diff --git a/.github/labeler.yml b/.github/labeler.yml index 8d28891d09e..9bbb7116f7e 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -207,6 +207,11 @@ MIPS: - librz/arch/p/analysis/*_mips_* - librz/bp/p/bp_mips.c +Hexagon: + - librz/arch/isa/hexagon/**/* + - librz/arch/p/asm/*_hexagon_* + - librz/arch/p/analysis/*_hexagon_* + PPC: - librz/arch/is*/ppc/**/* - librz/arch/p/asm/*_ppc_* diff --git a/librz/arch/isa/hexagon/hexagon.c b/librz/arch/isa/hexagon/hexagon.c index 010dc611868..571ed4a8577 100644 --- a/librz/arch/isa/hexagon/hexagon.c +++ b/librz/arch/isa/hexagon/hexagon.c @@ -1,9 +1,9 @@ // SPDX-FileCopyrightText: 2021 Rot127 // SPDX-License-Identifier: LGPL-3.0-only -// LLVM commit: 96e220e6886868d6663d966ecc396befffc355e7 -// LLVM commit date: 2022-01-05 11:01:52 +0000 (ISO 8601 format) -// Date of code generation: 2022-07-19 05:08:09-04:00 +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 //======================================== // The following code is generated. // Do not edit. Repository of code generator: @@ -17,851 +17,400 @@ #include #include #include -#include "hexagon.h" -#include "hexagon_insn.h" -#include "hexagon_arch.h" +#include +#include +#include +#include -char *hex_get_ctr_regs(int opcode_reg, bool get_alias) { - switch (opcode_reg) { - default: return ""; - case HEX_REG_CTR_REGS_C1: - return get_alias ? "LC0" : "C1"; - case HEX_REG_CTR_REGS_C0: - return get_alias ? "SA0" : "C0"; - case HEX_REG_CTR_REGS_C3: - return get_alias ? "LC1" : "C3"; - case HEX_REG_CTR_REGS_C2: - return get_alias ? "SA1" : "C2"; - case HEX_REG_CTR_REGS_C4: - return get_alias ? "P3:0" : "C4"; - case HEX_REG_CTR_REGS_C5: - return get_alias ? "C5" : "C5"; - case HEX_REG_CTR_REGS_C9: - return get_alias ? "PC" : "C9"; - case HEX_REG_CTR_REGS_C10: - return get_alias ? "UGP" : "C10"; - case HEX_REG_CTR_REGS_C11: - return get_alias ? "GP" : "C11"; - case HEX_REG_CTR_REGS_C12: - return get_alias ? "CS0" : "C12"; - case HEX_REG_CTR_REGS_C13: - return get_alias ? "CS1" : "C13"; - case HEX_REG_CTR_REGS_C14: - return get_alias ? "UPCYCLELO" : "C14"; - case HEX_REG_CTR_REGS_C15: - return get_alias ? "UPCYCLEHI" : "C15"; - case HEX_REG_CTR_REGS_C16: - return get_alias ? "FRAMELIMIT" : "C16"; - case HEX_REG_CTR_REGS_C17: - return get_alias ? "FRAMEKEY" : "C17"; - case HEX_REG_CTR_REGS_C18: - return get_alias ? "PKTCOUNTLO" : "C18"; - case HEX_REG_CTR_REGS_C19: - return get_alias ? "PKTCOUNTHI" : "C19"; - case HEX_REG_CTR_REGS_C30: - return get_alias ? "UTIMERLO" : "C30"; - case HEX_REG_CTR_REGS_C31: - return get_alias ? "UTIMERHI" : "C31"; - case HEX_REG_CTR_REGS_C6: - return get_alias ? "M0" : "C6"; - case HEX_REG_CTR_REGS_C7: - return get_alias ? "M1" : "C7"; - case HEX_REG_CTR_REGS_C8: - return get_alias ? "USR" : "C8"; +RZ_API ut32 hex_resolve_reg_enum_id(HexRegClass class, ut32 reg_num) { + switch (class) { + default: + return reg_num; + case HEX_REG_CLASS_GENERAL_DOUBLE_LOW8_REGS: { + reg_num = reg_num << 1; + if (reg_num > 6) { // HEX_REG_D3 == 6 + reg_num = (reg_num & 0x7) | 0x10; + } + return reg_num; + } + case HEX_REG_CLASS_GENERAL_SUB_REGS: { + if (reg_num > 7) { // HEX_REG_R7 == 7 + reg_num = (reg_num & 0x7) | 0x10; + } + return reg_num; + } + case HEX_REG_CLASS_MOD_REGS: { + reg_num |= 6; + + return reg_num; + } } + rz_warn_if_reached(); + return UT32_MAX; } -char *hex_get_ctr_regs64(int opcode_reg, bool get_alias) { - switch (opcode_reg) { - default: return ""; - case HEX_REG_CTR_REGS64_C1_0: - return get_alias ? "LC0:SA0" : "C1:0"; - case HEX_REG_CTR_REGS64_C3_2: - return get_alias ? "LC1:SA1" : "C3:2"; - case HEX_REG_CTR_REGS64_C5_4: - return "C5:4"; - case HEX_REG_CTR_REGS64_C7_6: - return get_alias ? "M1:0" : "C7:6"; - case HEX_REG_CTR_REGS64_C9_8: - return "C9:8"; - case HEX_REG_CTR_REGS64_C11_10: - return "C11:10"; - case HEX_REG_CTR_REGS64_C13_12: - return get_alias ? "CS1:0" : "C13:12"; - case HEX_REG_CTR_REGS64_C15_14: - return get_alias ? "UPCYCLE" : "C15:14"; - case HEX_REG_CTR_REGS64_C17_16: - return "C17:16"; - case HEX_REG_CTR_REGS64_C19_18: - return get_alias ? "PKTCOUNT" : "C19:18"; - case HEX_REG_CTR_REGS64_C31_30: - return get_alias ? "UTIMER" : "C31:30"; +const char *hex_get_ctr_regs(int reg_num, bool get_alias, bool get_new, bool reg_num_is_enum) { + if (reg_num >= ARRAY_LEN(hexagon_ctrregs_lt_v69)) { + RZ_LOG_INFO("%s: Index out of range during register name lookup: i = %d\n", "hex_get_ctr_regs", reg_num); + return NULL; + } + const char *name; + const HexRegNames rn = hexagon_ctrregs_lt_v69[reg_num]; + if (get_alias) { + name = get_new ? rn.alias_tmp : rn.alias; + } else { + name = get_new ? rn.name_tmp : rn.name; } + if (!name) { + RZ_LOG_INFO("%s: No register name present at index: %d\n", "hex_get_ctr_regs", reg_num); + return NULL; + } + return name; } -char *hex_get_double_regs(int opcode_reg, bool get_alias) { - switch (opcode_reg) { - default: return ""; - case HEX_REG_DOUBLE_REGS_R1_0: - return "R1:0"; - case HEX_REG_DOUBLE_REGS_R3_2: - return "R3:2"; - case HEX_REG_DOUBLE_REGS_R5_4: - return "R5:4"; - case HEX_REG_DOUBLE_REGS_R7_6: - return "R7:6"; - case HEX_REG_DOUBLE_REGS_R9_8: - return "R9:8"; - case HEX_REG_DOUBLE_REGS_R13_12: - return "R13:12"; - case HEX_REG_DOUBLE_REGS_R15_14: - return "R15:14"; - case HEX_REG_DOUBLE_REGS_R17_16: - return "R17:16"; - case HEX_REG_DOUBLE_REGS_R19_18: - return "R19:18"; - case HEX_REG_DOUBLE_REGS_R21_20: - return "R21:20"; - case HEX_REG_DOUBLE_REGS_R23_22: - return "R23:22"; - case HEX_REG_DOUBLE_REGS_R25_24: - return "R25:24"; - case HEX_REG_DOUBLE_REGS_R27_26: - return "R27:26"; - case HEX_REG_DOUBLE_REGS_R11_10: - return "R11:10"; - case HEX_REG_DOUBLE_REGS_R29_28: - return "R29:28"; - case HEX_REG_DOUBLE_REGS_R31_30: - return get_alias ? "LR:FP" : "R31:30"; +const char *hex_get_ctr_regs64(int reg_num, bool get_alias, bool get_new, bool reg_num_is_enum) { + if (reg_num >= ARRAY_LEN(hexagon_ctrregs64_lt_v69)) { + RZ_LOG_INFO("%s: Index out of range during register name lookup: i = %d\n", "hex_get_ctr_regs64", reg_num); + return NULL; } + const char *name; + const HexRegNames rn = hexagon_ctrregs64_lt_v69[reg_num]; + if (get_alias) { + name = get_new ? rn.alias_tmp : rn.alias; + } else { + name = get_new ? rn.name_tmp : rn.name; + } + if (!name) { + RZ_LOG_INFO("%s: No register name present at index: %d\n", "hex_get_ctr_regs64", reg_num); + return NULL; + } + return name; } -char *hex_get_general_double_low8_regs(int opcode_reg, bool get_alias) { - opcode_reg = opcode_reg << 1; - if (opcode_reg > 6) { // HEX_REG_D3 == 6 - opcode_reg = (opcode_reg & 0x7) | 0x10; - } - switch (opcode_reg) { - default: return ""; - case HEX_REG_GENERAL_DOUBLE_LOW8_REGS_R23_22: - return "R23:22"; - case HEX_REG_GENERAL_DOUBLE_LOW8_REGS_R21_20: - return "R21:20"; - case HEX_REG_GENERAL_DOUBLE_LOW8_REGS_R19_18: - return "R19:18"; - case HEX_REG_GENERAL_DOUBLE_LOW8_REGS_R17_16: - return "R17:16"; - case HEX_REG_GENERAL_DOUBLE_LOW8_REGS_R7_6: - return "R7:6"; - case HEX_REG_GENERAL_DOUBLE_LOW8_REGS_R5_4: - return "R5:4"; - case HEX_REG_GENERAL_DOUBLE_LOW8_REGS_R3_2: - return "R3:2"; - case HEX_REG_GENERAL_DOUBLE_LOW8_REGS_R1_0: - return "R1:0"; +const char *hex_get_double_regs(int reg_num, bool get_alias, bool get_new, bool reg_num_is_enum) { + if (reg_num >= ARRAY_LEN(hexagon_doubleregs_lt_v69)) { + RZ_LOG_INFO("%s: Index out of range during register name lookup: i = %d\n", "hex_get_double_regs", reg_num); + return NULL; + } + const char *name; + const HexRegNames rn = hexagon_doubleregs_lt_v69[reg_num]; + if (get_alias) { + name = get_new ? rn.alias_tmp : rn.alias; + } else { + name = get_new ? rn.name_tmp : rn.name; + } + if (!name) { + RZ_LOG_INFO("%s: No register name present at index: %d\n", "hex_get_double_regs", reg_num); + return NULL; } + return name; } -char *hex_get_general_sub_regs(int opcode_reg, bool get_alias) { - if (opcode_reg > 7) { // HEX_REG_R7 == 7 - opcode_reg = (opcode_reg & 0x7) | 0x10; - } - switch (opcode_reg) { - default: return ""; - case HEX_REG_GENERAL_SUB_REGS_R23: - return "R23"; - case HEX_REG_GENERAL_SUB_REGS_R22: - return "R22"; - case HEX_REG_GENERAL_SUB_REGS_R21: - return "R21"; - case HEX_REG_GENERAL_SUB_REGS_R20: - return "R20"; - case HEX_REG_GENERAL_SUB_REGS_R19: - return "R19"; - case HEX_REG_GENERAL_SUB_REGS_R18: - return "R18"; - case HEX_REG_GENERAL_SUB_REGS_R17: - return "R17"; - case HEX_REG_GENERAL_SUB_REGS_R16: - return "R16"; - case HEX_REG_GENERAL_SUB_REGS_R7: - return "R7"; - case HEX_REG_GENERAL_SUB_REGS_R6: - return "R6"; - case HEX_REG_GENERAL_SUB_REGS_R5: - return "R5"; - case HEX_REG_GENERAL_SUB_REGS_R4: - return "R4"; - case HEX_REG_GENERAL_SUB_REGS_R3: - return "R3"; - case HEX_REG_GENERAL_SUB_REGS_R2: - return "R2"; - case HEX_REG_GENERAL_SUB_REGS_R1: - return "R1"; - case HEX_REG_GENERAL_SUB_REGS_R0: - return "R0"; +const char *hex_get_general_double_low8_regs(int reg_num, bool get_alias, bool get_new, bool reg_num_is_enum) { + reg_num = hex_resolve_reg_enum_id(HEX_REG_CLASS_GENERAL_DOUBLE_LOW8_REGS, reg_num); + if (reg_num >= ARRAY_LEN(hexagon_generaldoublelow8regs_lt_v69)) { + RZ_LOG_INFO("%s: Index out of range during register name lookup: i = %d\n", "hex_get_general_double_low8_regs", reg_num); + return NULL; + } + const char *name; + const HexRegNames rn = hexagon_generaldoublelow8regs_lt_v69[reg_num]; + if (get_alias) { + name = get_new ? rn.alias_tmp : rn.alias; + } else { + name = get_new ? rn.name_tmp : rn.name; + } + if (!name) { + RZ_LOG_INFO("%s: No register name present at index: %d\n", "hex_get_general_double_low8_regs", reg_num); + return NULL; } + return name; } -char *hex_get_guest_regs(int opcode_reg, bool get_alias) { - switch (opcode_reg) { - default: return ""; - case HEX_REG_GUEST_REGS_G0: - return get_alias ? "GELR" : "G0"; - case HEX_REG_GUEST_REGS_G1: - return get_alias ? "GSR" : "G1"; - case HEX_REG_GUEST_REGS_G2: - return get_alias ? "GOSP" : "G2"; - case HEX_REG_GUEST_REGS_G3: - return get_alias ? "GBADVA" : "G3"; - case HEX_REG_GUEST_REGS_G4: - return "G4"; - case HEX_REG_GUEST_REGS_G5: - return "G5"; - case HEX_REG_GUEST_REGS_G6: - return "G6"; - case HEX_REG_GUEST_REGS_G7: - return "G7"; - case HEX_REG_GUEST_REGS_G8: - return "G8"; - case HEX_REG_GUEST_REGS_G9: - return "G9"; - case HEX_REG_GUEST_REGS_G10: - return "G10"; - case HEX_REG_GUEST_REGS_G11: - return "G11"; - case HEX_REG_GUEST_REGS_G12: - return "G12"; - case HEX_REG_GUEST_REGS_G13: - return "G13"; - case HEX_REG_GUEST_REGS_G14: - return "G14"; - case HEX_REG_GUEST_REGS_G15: - return "G15"; - case HEX_REG_GUEST_REGS_G16: - return get_alias ? "GPMUCNT4" : "G16"; - case HEX_REG_GUEST_REGS_G17: - return get_alias ? "GPMUCNT5" : "G17"; - case HEX_REG_GUEST_REGS_G18: - return get_alias ? "GPMUCNT6" : "G18"; - case HEX_REG_GUEST_REGS_G19: - return get_alias ? "GPMUCNT7" : "G19"; - case HEX_REG_GUEST_REGS_G20: - return "G20"; - case HEX_REG_GUEST_REGS_G21: - return "G21"; - case HEX_REG_GUEST_REGS_G22: - return "G22"; - case HEX_REG_GUEST_REGS_G23: - return "G23"; - case HEX_REG_GUEST_REGS_G24: - return get_alias ? "GPCYCLELO" : "G24"; - case HEX_REG_GUEST_REGS_G25: - return get_alias ? "GPCYCLEHI" : "G25"; - case HEX_REG_GUEST_REGS_G26: - return get_alias ? "GPMUCNT0" : "G26"; - case HEX_REG_GUEST_REGS_G27: - return get_alias ? "GPMUCNT1" : "G27"; - case HEX_REG_GUEST_REGS_G28: - return get_alias ? "GPMUCNT2" : "G28"; - case HEX_REG_GUEST_REGS_G29: - return get_alias ? "GPMUCNT3" : "G29"; - case HEX_REG_GUEST_REGS_G30: - return "G30"; - case HEX_REG_GUEST_REGS_G31: - return "G31"; +const char *hex_get_general_sub_regs(int reg_num, bool get_alias, bool get_new, bool reg_num_is_enum) { + reg_num = hex_resolve_reg_enum_id(HEX_REG_CLASS_GENERAL_SUB_REGS, reg_num); + if (reg_num >= ARRAY_LEN(hexagon_generalsubregs_lt_v69)) { + RZ_LOG_INFO("%s: Index out of range during register name lookup: i = %d\n", "hex_get_general_sub_regs", reg_num); + return NULL; + } + const char *name; + const HexRegNames rn = hexagon_generalsubregs_lt_v69[reg_num]; + if (get_alias) { + name = get_new ? rn.alias_tmp : rn.alias; + } else { + name = get_new ? rn.name_tmp : rn.name; } + if (!name) { + RZ_LOG_INFO("%s: No register name present at index: %d\n", "hex_get_general_sub_regs", reg_num); + return NULL; + } + return name; } -char *hex_get_guest_regs64(int opcode_reg, bool get_alias) { - switch (opcode_reg) { - default: return ""; - case HEX_REG_GUEST_REGS64_G1_0: - return "G1:0"; - case HEX_REG_GUEST_REGS64_G3_2: - return "G3:2"; - case HEX_REG_GUEST_REGS64_G5_4: - return "G5:4"; - case HEX_REG_GUEST_REGS64_G7_6: - return "G7:6"; - case HEX_REG_GUEST_REGS64_G9_8: - return "G9:8"; - case HEX_REG_GUEST_REGS64_G11_10: - return "G11:10"; - case HEX_REG_GUEST_REGS64_G13_12: - return "G13:12"; - case HEX_REG_GUEST_REGS64_G15_14: - return "G15:14"; - case HEX_REG_GUEST_REGS64_G17_16: - return "G17:16"; - case HEX_REG_GUEST_REGS64_G19_18: - return "G19:18"; - case HEX_REG_GUEST_REGS64_G21_20: - return "G21:20"; - case HEX_REG_GUEST_REGS64_G23_22: - return "G23:22"; - case HEX_REG_GUEST_REGS64_G25_24: - return "G25:24"; - case HEX_REG_GUEST_REGS64_G27_26: - return "G27:26"; - case HEX_REG_GUEST_REGS64_G29_28: - return "G29:28"; - case HEX_REG_GUEST_REGS64_G31_30: - return "G31:30"; +const char *hex_get_guest_regs(int reg_num, bool get_alias, bool get_new, bool reg_num_is_enum) { + if (reg_num >= ARRAY_LEN(hexagon_guestregs_lt_v69)) { + RZ_LOG_INFO("%s: Index out of range during register name lookup: i = %d\n", "hex_get_guest_regs", reg_num); + return NULL; + } + const char *name; + const HexRegNames rn = hexagon_guestregs_lt_v69[reg_num]; + if (get_alias) { + name = get_new ? rn.alias_tmp : rn.alias; + } else { + name = get_new ? rn.name_tmp : rn.name; } + if (!name) { + RZ_LOG_INFO("%s: No register name present at index: %d\n", "hex_get_guest_regs", reg_num); + return NULL; + } + return name; } -char *hex_get_hvx_qr(int opcode_reg, bool get_alias) { - switch (opcode_reg) { - default: return ""; - case HEX_REG_HVX_QR_Q0: - return "Q0"; - case HEX_REG_HVX_QR_Q1: - return "Q1"; - case HEX_REG_HVX_QR_Q2: - return "Q2"; - case HEX_REG_HVX_QR_Q3: - return "Q3"; +const char *hex_get_guest_regs64(int reg_num, bool get_alias, bool get_new, bool reg_num_is_enum) { + if (reg_num >= ARRAY_LEN(hexagon_guestregs64_lt_v69)) { + RZ_LOG_INFO("%s: Index out of range during register name lookup: i = %d\n", "hex_get_guest_regs64", reg_num); + return NULL; } + const char *name; + const HexRegNames rn = hexagon_guestregs64_lt_v69[reg_num]; + if (get_alias) { + name = get_new ? rn.alias_tmp : rn.alias; + } else { + name = get_new ? rn.name_tmp : rn.name; + } + if (!name) { + RZ_LOG_INFO("%s: No register name present at index: %d\n", "hex_get_guest_regs64", reg_num); + return NULL; + } + return name; } -char *hex_get_hvx_vqr(int opcode_reg, bool get_alias) { - switch (opcode_reg) { - default: return ""; - case HEX_REG_HVX_VQR_V3_0: - return "V3:0"; - case HEX_REG_HVX_VQR_V7_4: - return "V7:4"; - case HEX_REG_HVX_VQR_V11_8: - return "V11:8"; - case HEX_REG_HVX_VQR_V15_12: - return "V15:12"; - case HEX_REG_HVX_VQR_V19_16: - return "V19:16"; - case HEX_REG_HVX_VQR_V23_20: - return "V23:20"; - case HEX_REG_HVX_VQR_V27_24: - return "V27:24"; - case HEX_REG_HVX_VQR_V31_28: - return "V31:28"; +const char *hex_get_hvx_qr(int reg_num, bool get_alias, bool get_new, bool reg_num_is_enum) { + if (reg_num >= ARRAY_LEN(hexagon_hvxqr_lt_v69)) { + RZ_LOG_INFO("%s: Index out of range during register name lookup: i = %d\n", "hex_get_hvx_qr", reg_num); + return NULL; + } + const char *name; + const HexRegNames rn = hexagon_hvxqr_lt_v69[reg_num]; + if (get_alias) { + name = get_new ? rn.alias_tmp : rn.alias; + } else { + name = get_new ? rn.name_tmp : rn.name; } + if (!name) { + RZ_LOG_INFO("%s: No register name present at index: %d\n", "hex_get_hvx_qr", reg_num); + return NULL; + } + return name; } -char *hex_get_hvx_vr(int opcode_reg, bool get_alias) { - switch (opcode_reg) { - default: return ""; - case HEX_REG_HVX_VR_V0: - return "V0"; - case HEX_REG_HVX_VR_V1: - return "V1"; - case HEX_REG_HVX_VR_V2: - return "V2"; - case HEX_REG_HVX_VR_V3: - return "V3"; - case HEX_REG_HVX_VR_V4: - return "V4"; - case HEX_REG_HVX_VR_V5: - return "V5"; - case HEX_REG_HVX_VR_V6: - return "V6"; - case HEX_REG_HVX_VR_V7: - return "V7"; - case HEX_REG_HVX_VR_V8: - return "V8"; - case HEX_REG_HVX_VR_V9: - return "V9"; - case HEX_REG_HVX_VR_V10: - return "V10"; - case HEX_REG_HVX_VR_V11: - return "V11"; - case HEX_REG_HVX_VR_V12: - return "V12"; - case HEX_REG_HVX_VR_V13: - return "V13"; - case HEX_REG_HVX_VR_V14: - return "V14"; - case HEX_REG_HVX_VR_V15: - return "V15"; - case HEX_REG_HVX_VR_V16: - return "V16"; - case HEX_REG_HVX_VR_V17: - return "V17"; - case HEX_REG_HVX_VR_V18: - return "V18"; - case HEX_REG_HVX_VR_V19: - return "V19"; - case HEX_REG_HVX_VR_V20: - return "V20"; - case HEX_REG_HVX_VR_V21: - return "V21"; - case HEX_REG_HVX_VR_V22: - return "V22"; - case HEX_REG_HVX_VR_V23: - return "V23"; - case HEX_REG_HVX_VR_V24: - return "V24"; - case HEX_REG_HVX_VR_V25: - return "V25"; - case HEX_REG_HVX_VR_V26: - return "V26"; - case HEX_REG_HVX_VR_V27: - return "V27"; - case HEX_REG_HVX_VR_V28: - return "V28"; - case HEX_REG_HVX_VR_V29: - return "V29"; - case HEX_REG_HVX_VR_V30: - return "V30"; - case HEX_REG_HVX_VR_V31: - return "V31"; +const char *hex_get_hvx_vqr(int reg_num, bool get_alias, bool get_new, bool reg_num_is_enum) { + if (reg_num >= ARRAY_LEN(hexagon_hvxvqr_lt_v69)) { + RZ_LOG_INFO("%s: Index out of range during register name lookup: i = %d\n", "hex_get_hvx_vqr", reg_num); + return NULL; + } + const char *name; + const HexRegNames rn = hexagon_hvxvqr_lt_v69[reg_num]; + if (get_alias) { + name = get_new ? rn.alias_tmp : rn.alias; + } else { + name = get_new ? rn.name_tmp : rn.name; } + if (!name) { + RZ_LOG_INFO("%s: No register name present at index: %d\n", "hex_get_hvx_vqr", reg_num); + return NULL; + } + return name; } -char *hex_get_hvx_wr(int opcode_reg, bool get_alias) { - switch (opcode_reg) { - default: return ""; - case HEX_REG_HVX_WR_V1_0: - return "V1:0"; - case HEX_REG_HVX_WR_V3_2: - return "V3:2"; - case HEX_REG_HVX_WR_V5_4: - return "V5:4"; - case HEX_REG_HVX_WR_V7_6: - return "V7:6"; - case HEX_REG_HVX_WR_V9_8: - return "V9:8"; - case HEX_REG_HVX_WR_V11_10: - return "V11:10"; - case HEX_REG_HVX_WR_V13_12: - return "V13:12"; - case HEX_REG_HVX_WR_V15_14: - return "V15:14"; - case HEX_REG_HVX_WR_V17_16: - return "V17:16"; - case HEX_REG_HVX_WR_V19_18: - return "V19:18"; - case HEX_REG_HVX_WR_V21_20: - return "V21:20"; - case HEX_REG_HVX_WR_V23_22: - return "V23:22"; - case HEX_REG_HVX_WR_V25_24: - return "V25:24"; - case HEX_REG_HVX_WR_V27_26: - return "V27:26"; - case HEX_REG_HVX_WR_V29_28: - return "V29:28"; - case HEX_REG_HVX_WR_V31_30: - return "V31:30"; +const char *hex_get_hvx_vr(int reg_num, bool get_alias, bool get_new, bool reg_num_is_enum) { + if (reg_num >= ARRAY_LEN(hexagon_hvxvr_lt_v69)) { + RZ_LOG_INFO("%s: Index out of range during register name lookup: i = %d\n", "hex_get_hvx_vr", reg_num); + return NULL; } + const char *name; + const HexRegNames rn = hexagon_hvxvr_lt_v69[reg_num]; + if (get_alias) { + name = get_new ? rn.alias_tmp : rn.alias; + } else { + name = get_new ? rn.name_tmp : rn.name; + } + if (!name) { + RZ_LOG_INFO("%s: No register name present at index: %d\n", "hex_get_hvx_vr", reg_num); + return NULL; + } + return name; } -char *hex_get_int_regs(int opcode_reg, bool get_alias) { - switch (opcode_reg) { - default: return ""; - case HEX_REG_INT_REGS_R0: - return "R0"; - case HEX_REG_INT_REGS_R1: - return "R1"; - case HEX_REG_INT_REGS_R2: - return "R2"; - case HEX_REG_INT_REGS_R3: - return "R3"; - case HEX_REG_INT_REGS_R4: - return "R4"; - case HEX_REG_INT_REGS_R5: - return "R5"; - case HEX_REG_INT_REGS_R6: - return "R6"; - case HEX_REG_INT_REGS_R7: - return "R7"; - case HEX_REG_INT_REGS_R8: - return "R8"; - case HEX_REG_INT_REGS_R9: - return "R9"; - case HEX_REG_INT_REGS_R12: - return "R12"; - case HEX_REG_INT_REGS_R13: - return "R13"; - case HEX_REG_INT_REGS_R14: - return "R14"; - case HEX_REG_INT_REGS_R15: - return "R15"; - case HEX_REG_INT_REGS_R16: - return "R16"; - case HEX_REG_INT_REGS_R17: - return "R17"; - case HEX_REG_INT_REGS_R18: - return "R18"; - case HEX_REG_INT_REGS_R19: - return "R19"; - case HEX_REG_INT_REGS_R20: - return "R20"; - case HEX_REG_INT_REGS_R21: - return "R21"; - case HEX_REG_INT_REGS_R22: - return "R22"; - case HEX_REG_INT_REGS_R23: - return "R23"; - case HEX_REG_INT_REGS_R24: - return "R24"; - case HEX_REG_INT_REGS_R25: - return "R25"; - case HEX_REG_INT_REGS_R26: - return "R26"; - case HEX_REG_INT_REGS_R27: - return "R27"; - case HEX_REG_INT_REGS_R28: - return "R28"; - case HEX_REG_INT_REGS_R10: - return "R10"; - case HEX_REG_INT_REGS_R11: - return "R11"; - case HEX_REG_INT_REGS_R29: - return get_alias ? "SP" : "R29"; - case HEX_REG_INT_REGS_R30: - return get_alias ? "FP" : "R30"; - case HEX_REG_INT_REGS_R31: - return get_alias ? "LR" : "R31"; +const char *hex_get_hvx_wr(int reg_num, bool get_alias, bool get_new, bool reg_num_is_enum) { + if (reg_num >= ARRAY_LEN(hexagon_hvxwr_lt_v69)) { + RZ_LOG_INFO("%s: Index out of range during register name lookup: i = %d\n", "hex_get_hvx_wr", reg_num); + return NULL; + } + const char *name; + const HexRegNames rn = hexagon_hvxwr_lt_v69[reg_num]; + if (get_alias) { + name = get_new ? rn.alias_tmp : rn.alias; + } else { + name = get_new ? rn.name_tmp : rn.name; } + if (!name) { + RZ_LOG_INFO("%s: No register name present at index: %d\n", "hex_get_hvx_wr", reg_num); + return NULL; + } + return name; } -char *hex_get_int_regs_low8(int opcode_reg, bool get_alias) { - switch (opcode_reg) { - default: return ""; - case HEX_REG_INT_REGS_LOW8_R7: - return "R7"; - case HEX_REG_INT_REGS_LOW8_R6: - return "R6"; - case HEX_REG_INT_REGS_LOW8_R5: - return "R5"; - case HEX_REG_INT_REGS_LOW8_R4: - return "R4"; - case HEX_REG_INT_REGS_LOW8_R3: - return "R3"; - case HEX_REG_INT_REGS_LOW8_R2: - return "R2"; - case HEX_REG_INT_REGS_LOW8_R1: - return "R1"; - case HEX_REG_INT_REGS_LOW8_R0: - return "R0"; +const char *hex_get_int_regs(int reg_num, bool get_alias, bool get_new, bool reg_num_is_enum) { + if (reg_num >= ARRAY_LEN(hexagon_intregs_lt_v69)) { + RZ_LOG_INFO("%s: Index out of range during register name lookup: i = %d\n", "hex_get_int_regs", reg_num); + return NULL; } + const char *name; + const HexRegNames rn = hexagon_intregs_lt_v69[reg_num]; + if (get_alias) { + name = get_new ? rn.alias_tmp : rn.alias; + } else { + name = get_new ? rn.name_tmp : rn.name; + } + if (!name) { + RZ_LOG_INFO("%s: No register name present at index: %d\n", "hex_get_int_regs", reg_num); + return NULL; + } + return name; } -char *hex_get_mod_regs(int opcode_reg, bool get_alias) { - opcode_reg |= 6; - switch (opcode_reg) { - default: return ""; - case HEX_REG_MOD_REGS_C6: - return get_alias ? "M0" : "C6"; - case HEX_REG_MOD_REGS_C7: - return get_alias ? "M1" : "C7"; +const char *hex_get_int_regs_low8(int reg_num, bool get_alias, bool get_new, bool reg_num_is_enum) { + if (reg_num >= ARRAY_LEN(hexagon_intregslow8_lt_v69)) { + RZ_LOG_INFO("%s: Index out of range during register name lookup: i = %d\n", "hex_get_int_regs_low8", reg_num); + return NULL; + } + const char *name; + const HexRegNames rn = hexagon_intregslow8_lt_v69[reg_num]; + if (get_alias) { + name = get_new ? rn.alias_tmp : rn.alias; + } else { + name = get_new ? rn.name_tmp : rn.name; } + if (!name) { + RZ_LOG_INFO("%s: No register name present at index: %d\n", "hex_get_int_regs_low8", reg_num); + return NULL; + } + return name; } -char *hex_get_pred_regs(int opcode_reg, bool get_alias) { - switch (opcode_reg) { - default: return ""; - case HEX_REG_PRED_REGS_P0: - return "P0"; - case HEX_REG_PRED_REGS_P1: - return "P1"; - case HEX_REG_PRED_REGS_P2: - return "P2"; - case HEX_REG_PRED_REGS_P3: - return "P3"; +const char *hex_get_mod_regs(int reg_num, bool get_alias, bool get_new, bool reg_num_is_enum) { + reg_num = hex_resolve_reg_enum_id(HEX_REG_CLASS_MOD_REGS, reg_num); + if (reg_num >= ARRAY_LEN(hexagon_modregs_lt_v69)) { + RZ_LOG_INFO("%s: Index out of range during register name lookup: i = %d\n", "hex_get_mod_regs", reg_num); + return NULL; + } + const char *name; + const HexRegNames rn = hexagon_modregs_lt_v69[reg_num]; + if (get_alias) { + name = get_new ? rn.alias_tmp : rn.alias; + } else { + name = get_new ? rn.name_tmp : rn.name; + } + if (!name) { + RZ_LOG_INFO("%s: No register name present at index: %d\n", "hex_get_mod_regs", reg_num); + return NULL; } + return name; } -char *hex_get_sys_regs(int opcode_reg, bool get_alias) { - switch (opcode_reg) { - default: return ""; - case HEX_REG_SYS_REGS_S0: - return get_alias ? "SGP0" : "S0"; - case HEX_REG_SYS_REGS_S1: - return get_alias ? "SGP1" : "S1"; - case HEX_REG_SYS_REGS_S2: - return get_alias ? "STID" : "S2"; - case HEX_REG_SYS_REGS_S3: - return get_alias ? "ELR" : "S3"; - case HEX_REG_SYS_REGS_S4: - return get_alias ? "BADVA0" : "S4"; - case HEX_REG_SYS_REGS_S5: - return get_alias ? "BADVA1" : "S5"; - case HEX_REG_SYS_REGS_S6: - return get_alias ? "SSR" : "S6"; - case HEX_REG_SYS_REGS_S7: - return get_alias ? "CCR" : "S7"; - case HEX_REG_SYS_REGS_S8: - return get_alias ? "HTID" : "S8"; - case HEX_REG_SYS_REGS_S9: - return get_alias ? "BADVA" : "S9"; - case HEX_REG_SYS_REGS_S10: - return get_alias ? "IMASK" : "S10"; - case HEX_REG_SYS_REGS_S11: - return "S11"; - case HEX_REG_SYS_REGS_S12: - return "S12"; - case HEX_REG_SYS_REGS_S13: - return "S13"; - case HEX_REG_SYS_REGS_S14: - return "S14"; - case HEX_REG_SYS_REGS_S15: - return "S15"; - case HEX_REG_SYS_REGS_S19: - return get_alias ? "S19" : "S19"; - case HEX_REG_SYS_REGS_S23: - return "S23"; - case HEX_REG_SYS_REGS_S25: - return "S25"; - case HEX_REG_SYS_REGS_S16: - return get_alias ? "EVB" : "S16"; - case HEX_REG_SYS_REGS_S17: - return get_alias ? "MODECTL" : "S17"; - case HEX_REG_SYS_REGS_S18: - return get_alias ? "SYSCFG" : "S18"; - case HEX_REG_SYS_REGS_S20: - return get_alias ? "S20" : "S20"; - case HEX_REG_SYS_REGS_S21: - return get_alias ? "VID" : "S21"; - case HEX_REG_SYS_REGS_S22: - return get_alias ? "S22" : "S22"; - case HEX_REG_SYS_REGS_S24: - return "S24"; - case HEX_REG_SYS_REGS_S26: - return "S26"; - case HEX_REG_SYS_REGS_S27: - return get_alias ? "CFGBASE" : "S27"; - case HEX_REG_SYS_REGS_S28: - return get_alias ? "DIAG" : "S28"; - case HEX_REG_SYS_REGS_S29: - return get_alias ? "REV" : "S29"; - case HEX_REG_SYS_REGS_S31: - return get_alias ? "PCYCLEHI" : "S31"; - case HEX_REG_SYS_REGS_S30: - return get_alias ? "PCYCLELO" : "S30"; - case HEX_REG_SYS_REGS_S32: - return get_alias ? "ISDBST" : "S32"; - case HEX_REG_SYS_REGS_S33: - return get_alias ? "ISDBCFG0" : "S33"; - case HEX_REG_SYS_REGS_S34: - return get_alias ? "ISDBCFG1" : "S34"; - case HEX_REG_SYS_REGS_S35: - return "S35"; - case HEX_REG_SYS_REGS_S36: - return get_alias ? "BRKPTPC0" : "S36"; - case HEX_REG_SYS_REGS_S37: - return get_alias ? "BRKPTCFG0" : "S37"; - case HEX_REG_SYS_REGS_S38: - return get_alias ? "BRKPTPC1" : "S38"; - case HEX_REG_SYS_REGS_S39: - return get_alias ? "BRKPTCFG1" : "S39"; - case HEX_REG_SYS_REGS_S40: - return get_alias ? "ISDBMBXIN" : "S40"; - case HEX_REG_SYS_REGS_S41: - return get_alias ? "ISDBMBXOUT" : "S41"; - case HEX_REG_SYS_REGS_S42: - return get_alias ? "ISDBEN" : "S42"; - case HEX_REG_SYS_REGS_S43: - return get_alias ? "ISDBGPR" : "S43"; - case HEX_REG_SYS_REGS_S44: - return "S44"; - case HEX_REG_SYS_REGS_S45: - return "S45"; - case HEX_REG_SYS_REGS_S46: - return "S46"; - case HEX_REG_SYS_REGS_S47: - return "S47"; - case HEX_REG_SYS_REGS_S48: - return get_alias ? "PMUCNT0" : "S48"; - case HEX_REG_SYS_REGS_S49: - return get_alias ? "PMUCNT1" : "S49"; - case HEX_REG_SYS_REGS_S50: - return get_alias ? "PMUCNT2" : "S50"; - case HEX_REG_SYS_REGS_S51: - return get_alias ? "PMUCNT3" : "S51"; - case HEX_REG_SYS_REGS_S52: - return get_alias ? "PMUEVTCFG" : "S52"; - case HEX_REG_SYS_REGS_S53: - return get_alias ? "PMUCFG" : "S53"; - case HEX_REG_SYS_REGS_S54: - return "S54"; - case HEX_REG_SYS_REGS_S55: - return "S55"; - case HEX_REG_SYS_REGS_S56: - return "S56"; - case HEX_REG_SYS_REGS_S57: - return "S57"; - case HEX_REG_SYS_REGS_S58: - return "S58"; - case HEX_REG_SYS_REGS_S59: - return "S59"; - case HEX_REG_SYS_REGS_S60: - return "S60"; - case HEX_REG_SYS_REGS_S61: - return "S61"; - case HEX_REG_SYS_REGS_S62: - return "S62"; - case HEX_REG_SYS_REGS_S63: - return "S63"; - case HEX_REG_SYS_REGS_S64: - return "S64"; - case HEX_REG_SYS_REGS_S65: - return "S65"; - case HEX_REG_SYS_REGS_S66: - return "S66"; - case HEX_REG_SYS_REGS_S67: - return "S67"; - case HEX_REG_SYS_REGS_S68: - return "S68"; - case HEX_REG_SYS_REGS_S69: - return "S69"; - case HEX_REG_SYS_REGS_S70: - return "S70"; - case HEX_REG_SYS_REGS_S71: - return "S71"; - case HEX_REG_SYS_REGS_S72: - return "S72"; - case HEX_REG_SYS_REGS_S73: - return "S73"; - case HEX_REG_SYS_REGS_S74: - return "S74"; - case HEX_REG_SYS_REGS_S75: - return "S75"; - case HEX_REG_SYS_REGS_S76: - return "S76"; - case HEX_REG_SYS_REGS_S77: - return "S77"; - case HEX_REG_SYS_REGS_S78: - return "S78"; - case HEX_REG_SYS_REGS_S79: - return "S79"; - case HEX_REG_SYS_REGS_S80: - return "S80"; +const char *hex_get_pred_regs(int reg_num, bool get_alias, bool get_new, bool reg_num_is_enum) { + if (reg_num >= ARRAY_LEN(hexagon_predregs_lt_v69)) { + RZ_LOG_INFO("%s: Index out of range during register name lookup: i = %d\n", "hex_get_pred_regs", reg_num); + return NULL; + } + const char *name; + const HexRegNames rn = hexagon_predregs_lt_v69[reg_num]; + if (get_alias) { + name = get_new ? rn.alias_tmp : rn.alias; + } else { + name = get_new ? rn.name_tmp : rn.name; + } + if (!name) { + RZ_LOG_INFO("%s: No register name present at index: %d\n", "hex_get_pred_regs", reg_num); + return NULL; + } + return name; +} + +const char *hex_get_sys_regs(int reg_num, bool get_alias, bool get_new, bool reg_num_is_enum) { + if (reg_num >= ARRAY_LEN(hexagon_sysregs_lt_v69)) { + RZ_LOG_INFO("%s: Index out of range during register name lookup: i = %d\n", "hex_get_sys_regs", reg_num); + return NULL; + } + const char *name; + const HexRegNames rn = hexagon_sysregs_lt_v69[reg_num]; + if (get_alias) { + name = get_new ? rn.alias_tmp : rn.alias; + } else { + name = get_new ? rn.name_tmp : rn.name; } + if (!name) { + RZ_LOG_INFO("%s: No register name present at index: %d\n", "hex_get_sys_regs", reg_num); + return NULL; + } + return name; } -char *hex_get_sys_regs64(int opcode_reg, bool get_alias) { - switch (opcode_reg) { - default: return ""; - case HEX_REG_SYS_REGS64_S1_0: - return get_alias ? "SGP1:0" : "S1:0"; - case HEX_REG_SYS_REGS64_S3_2: - return "S3:2"; - case HEX_REG_SYS_REGS64_S5_4: - return get_alias ? "BADVA1:0" : "S5:4"; - case HEX_REG_SYS_REGS64_S7_6: - return get_alias ? "CCR:SSR" : "S7:6"; - case HEX_REG_SYS_REGS64_S9_8: - return "S9:8"; - case HEX_REG_SYS_REGS64_S11_10: - return "S11:10"; - case HEX_REG_SYS_REGS64_S13_12: - return "S13:12"; - case HEX_REG_SYS_REGS64_S15_14: - return "S15:14"; - case HEX_REG_SYS_REGS64_S17_16: - return "S17:16"; - case HEX_REG_SYS_REGS64_S19_18: - return "S19:18"; - case HEX_REG_SYS_REGS64_S21_20: - return "S21:20"; - case HEX_REG_SYS_REGS64_S23_22: - return "S23:22"; - case HEX_REG_SYS_REGS64_S25_24: - return "S25:24"; - case HEX_REG_SYS_REGS64_S27_26: - return "S27:26"; - case HEX_REG_SYS_REGS64_S29_28: - return "S29:28"; - case HEX_REG_SYS_REGS64_S31_30: - return get_alias ? "PCYCLE" : "S31:30"; - case HEX_REG_SYS_REGS64_S33_32: - return "S33:32"; - case HEX_REG_SYS_REGS64_S35_34: - return "S35:34"; - case HEX_REG_SYS_REGS64_S37_36: - return "S37:36"; - case HEX_REG_SYS_REGS64_S39_38: - return "S39:38"; - case HEX_REG_SYS_REGS64_S41_40: - return "S41:40"; - case HEX_REG_SYS_REGS64_S43_42: - return "S43:42"; - case HEX_REG_SYS_REGS64_S45_44: - return "S45:44"; - case HEX_REG_SYS_REGS64_S47_46: - return "S47:46"; - case HEX_REG_SYS_REGS64_S49_48: - return "S49:48"; - case HEX_REG_SYS_REGS64_S51_50: - return "S51:50"; - case HEX_REG_SYS_REGS64_S53_52: - return "S53:52"; - case HEX_REG_SYS_REGS64_S55_54: - return "S55:54"; - case HEX_REG_SYS_REGS64_S57_56: - return "S57:56"; - case HEX_REG_SYS_REGS64_S59_58: - return "S59:58"; - case HEX_REG_SYS_REGS64_S61_60: - return "S61:60"; - case HEX_REG_SYS_REGS64_S63_62: - return "S63:62"; - case HEX_REG_SYS_REGS64_S65_64: - return "S65:64"; - case HEX_REG_SYS_REGS64_S67_66: - return "S67:66"; - case HEX_REG_SYS_REGS64_S69_68: - return "S69:68"; - case HEX_REG_SYS_REGS64_S71_70: - return "S71:70"; - case HEX_REG_SYS_REGS64_S73_72: - return "S73:72"; - case HEX_REG_SYS_REGS64_S75_74: - return "S75:74"; - case HEX_REG_SYS_REGS64_S77_76: - return "S77:76"; - case HEX_REG_SYS_REGS64_S79_78: - return "S79:78"; +const char *hex_get_sys_regs64(int reg_num, bool get_alias, bool get_new, bool reg_num_is_enum) { + if (reg_num >= ARRAY_LEN(hexagon_sysregs64_lt_v69)) { + RZ_LOG_INFO("%s: Index out of range during register name lookup: i = %d\n", "hex_get_sys_regs64", reg_num); + return NULL; } + const char *name; + const HexRegNames rn = hexagon_sysregs64_lt_v69[reg_num]; + if (get_alias) { + name = get_new ? rn.alias_tmp : rn.alias; + } else { + name = get_new ? rn.name_tmp : rn.name; + } + if (!name) { + RZ_LOG_INFO("%s: No register name present at index: %d\n", "hex_get_sys_regs64", reg_num); + return NULL; + } + return name; } -char *hex_get_reg_in_class(HexRegClass cls, int opcode_reg, bool get_alias) { + +RZ_API const char *hex_get_reg_in_class(HexRegClass cls, int reg_num, bool get_alias, bool get_new, bool reg_num_is_enum) { switch (cls) { case HEX_REG_CLASS_CTR_REGS: - return hex_get_ctr_regs(opcode_reg, get_alias); + return hex_get_ctr_regs(reg_num, get_alias, get_new, reg_num_is_enum); case HEX_REG_CLASS_CTR_REGS64: - return hex_get_ctr_regs64(opcode_reg, get_alias); + return hex_get_ctr_regs64(reg_num, get_alias, get_new, reg_num_is_enum); case HEX_REG_CLASS_DOUBLE_REGS: - return hex_get_double_regs(opcode_reg, get_alias); + return hex_get_double_regs(reg_num, get_alias, get_new, reg_num_is_enum); case HEX_REG_CLASS_GENERAL_DOUBLE_LOW8_REGS: - return hex_get_general_double_low8_regs(opcode_reg, get_alias); + return hex_get_general_double_low8_regs(reg_num, get_alias, get_new, reg_num_is_enum); case HEX_REG_CLASS_GENERAL_SUB_REGS: - return hex_get_general_sub_regs(opcode_reg, get_alias); + return hex_get_general_sub_regs(reg_num, get_alias, get_new, reg_num_is_enum); case HEX_REG_CLASS_GUEST_REGS: - return hex_get_guest_regs(opcode_reg, get_alias); + return hex_get_guest_regs(reg_num, get_alias, get_new, reg_num_is_enum); case HEX_REG_CLASS_GUEST_REGS64: - return hex_get_guest_regs64(opcode_reg, get_alias); + return hex_get_guest_regs64(reg_num, get_alias, get_new, reg_num_is_enum); case HEX_REG_CLASS_HVX_QR: - return hex_get_hvx_qr(opcode_reg, get_alias); + return hex_get_hvx_qr(reg_num, get_alias, get_new, reg_num_is_enum); case HEX_REG_CLASS_HVX_VQR: - return hex_get_hvx_vqr(opcode_reg, get_alias); + return hex_get_hvx_vqr(reg_num, get_alias, get_new, reg_num_is_enum); case HEX_REG_CLASS_HVX_VR: - return hex_get_hvx_vr(opcode_reg, get_alias); + return hex_get_hvx_vr(reg_num, get_alias, get_new, reg_num_is_enum); case HEX_REG_CLASS_HVX_WR: - return hex_get_hvx_wr(opcode_reg, get_alias); + return hex_get_hvx_wr(reg_num, get_alias, get_new, reg_num_is_enum); case HEX_REG_CLASS_INT_REGS: - return hex_get_int_regs(opcode_reg, get_alias); + return hex_get_int_regs(reg_num, get_alias, get_new, reg_num_is_enum); case HEX_REG_CLASS_INT_REGS_LOW8: - return hex_get_int_regs_low8(opcode_reg, get_alias); + return hex_get_int_regs_low8(reg_num, get_alias, get_new, reg_num_is_enum); case HEX_REG_CLASS_MOD_REGS: - return hex_get_mod_regs(opcode_reg, get_alias); + return hex_get_mod_regs(reg_num, get_alias, get_new, reg_num_is_enum); case HEX_REG_CLASS_PRED_REGS: - return hex_get_pred_regs(opcode_reg, get_alias); + return hex_get_pred_regs(reg_num, get_alias, get_new, reg_num_is_enum); case HEX_REG_CLASS_SYS_REGS: - return hex_get_sys_regs(opcode_reg, get_alias); + return hex_get_sys_regs(reg_num, get_alias, get_new, reg_num_is_enum); case HEX_REG_CLASS_SYS_REGS64: - return hex_get_sys_regs64(opcode_reg, get_alias); + return hex_get_sys_regs64(reg_num, get_alias, get_new, reg_num_is_enum); default: return NULL; } @@ -922,3 +471,61 @@ int resolve_n_register(const int reg_num, const ut32 addr, const HexPkt *p) { } return UT32_MAX; } + +/** + * \brief Returns a HexOp of the given register number and class. + * + * \param reg_num The register number as in the name. + * \param reg_class The HexRegClass this register belongs to. + * \param tmp_reg Flag if the register is a .new register. + * + * \return A setup HexOp. Currently the HexOp.attr field is *not* set! + */ +RZ_API const HexOp hex_explicit_to_op(ut32 reg_num, HexRegClass reg_class, bool tmp_reg) { + HexOp op = { 0 }; + op.type = HEX_OP_TYPE_REG; + op.class = reg_class; + op.op.reg = reg_num; + // TODO: Add attributes? + return op; +} + +/** + * \brief Returns a HexOp of the given register alias. + * + * \param alias The alias to get the HexOp for. + * \param tmp_reg Flag if the alias is referring to the .new register. + * + * \return A setup HexOp. Currently the HexOp.attr field is *not* set! + */ +RZ_API const HexOp hex_alias_to_op(HexRegAlias alias, bool tmp_reg) { + HexOp op = { 0 }; + if (alias >= ARRAY_LEN(hex_alias_reg_lt_v69)) { + rz_warn_if_reached(); + return op; + } + op.type = HEX_OP_TYPE_REG; + op.class = hex_alias_reg_lt_v69[alias].cls; + op.op.reg = hex_alias_reg_lt_v69[alias].reg_enum; + // TODO: Add attributes? + return op; +} + +/** + * \brief Returns the real register name for a register alias. + * + * \param alias The register alias. + * \param tmp_reg The register the tmp real register name. + * \return const char * The corresponding register name. Or NULL on error. + */ +RZ_API const char *hex_alias_to_reg(HexRegAlias alias, bool tmp_reg) { + if (alias >= ARRAY_LEN(hex_alias_reg_lt_v69)) { + return NULL; + } + HexRegClass reg_class = hex_alias_reg_lt_v69[alias].cls; + int reg_enum = hex_alias_reg_lt_v69[alias].reg_enum; + if (alias == HEX_REG_ALIAS_PC) { + return "PC"; + } + return hex_get_reg_in_class(reg_class, reg_enum, false, tmp_reg, true); +} diff --git a/librz/arch/isa/hexagon/hexagon.h b/librz/arch/isa/hexagon/hexagon.h index c8f6e7e5bce..35c907ed924 100644 --- a/librz/arch/isa/hexagon/hexagon.h +++ b/librz/arch/isa/hexagon/hexagon.h @@ -1,9 +1,9 @@ // SPDX-FileCopyrightText: 2021 Rot127 // SPDX-License-Identifier: LGPL-3.0-only -// LLVM commit: 96e220e6886868d6663d966ecc396befffc355e7 -// LLVM commit date: 2022-01-05 11:01:52 +0000 (ISO 8601 format) -// Date of code generation: 2022-09-12 14:26:04-04:00 +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 //======================================== // The following code is generated. // Do not edit. Repository of code generator: @@ -17,13 +17,43 @@ #include #include #include -#include "hexagon_insn.h" +#include -#define HEX_MAX_OPERANDS 6 -#define HEX_PARSE_BITS_MASK 0xc000 +#define HEX_INSN_SIZE 4 +#define HEX_MAX_INSN_PER_PKT 4 + +#define HEX_PC_ALIGNMENT 0x4 + +#define HEX_PRED_WIDTH 8 +#define HEX_GPR_WIDTH 32 +#define HEX_GPR64_WIDTH 64 +#define HEX_CTR_WIDTH 32 +#define HEX_CTR64_WIDTH 64 + +#define HEX_INVALID_INSN_0 0x00000000 +#define HEX_INVALID_INSN_F 0xffffffff #define MAX_CONST_EXT 512 #define HEXAGON_STATE_PKTS 8 +#define ARRAY_LEN(a) (sizeof(a) / sizeof((a)[0])) + +#define ALIAS2OP(alias, is_new) hex_alias_to_op(alias, is_new) +#define EXPLICIT2OP(num, class, is_new) hex_explicit_to_op(num, class, is_new) +#define NREG2OP(bundle, isa_id) hex_nreg_to_op(bundle, isa_id) +#define HEX_MAX_OPERANDS 6 +#define HEX_PARSE_BITS_MASK 0xc000 + +typedef struct { + const char *name; + const char *alias; + const char *name_tmp; + const char *alias_tmp; +} HexRegNames; + +typedef struct { + ut32 /* Reg class */ cls; + ut32 /* Reg Enum */ reg_enum; +} HexRegAliasMapping; typedef enum { HEX_OP_TYPE_IMM, @@ -65,15 +95,55 @@ typedef struct { } HexPktInfo; typedef struct { - ut8 type; + ut8 /* HexOpType */ type; ///< Operand type: Immediate or register + ut8 class; ///< Equivalent to: HexRegClass (for registers) OR HexOpTemplateFlag (for immediate values). + char isa_id; ///< The identifier character in the ISA of this instruction: 'd' for Rdd, I for Ii etc. 0x0 if not known. union { - ut8 reg; // + additional Hi or Lo selector // + additional shift // + additional :brev // - st64 imm; - } op; - HexOpAttr attr; - ut8 shift; + ut8 reg; ///< Register number. E.g. 3 for R3 etc. + st64 imm; ///< Immediate value. + } op; ///< Actual value of the operand. + HexOpAttr attr; ///< Attributes of the operand. + ut8 shift; ///< Number of bits to shift the bits in the opcode to retrieve the operand value. } HexOp; +typedef RzILOpEffect *(*HexILOpGetter)(void /* HexInsnPktBundle */ *); + +typedef enum { + HEX_IL_INSN_ATTR_INVALID = 0, ///< Operation was not set or implemented. + HEX_IL_INSN_ATTR_NONE = 1 << 0, ///< Nothing special about this operation. + HEX_IL_INSN_ATTR_COND = 1 << 1, ///< Executes differently if a certain condition is met. + HEX_IL_INSN_ATTR_SUB = 1 << 2, ///< Operation is a sub-instruction. + HEX_IL_INSN_ATTR_BRANCH = 1 << 3, ///< Operation contains a branch. + HEX_IL_INSN_ATTR_MEM_READ = 1 << 4, ///< Operation reads from the memory. + HEX_IL_INSN_ATTR_MEM_WRITE = 1 << 5, ///< Operation writes to the memory. + HEX_IL_INSN_ATTR_NEW = 1 << 6, ///< Operation reads a .new value. + HEX_IL_INSN_ATTR_WPRED = 1 << 7, ///< Operation writes a predicate register. + HEX_IL_INSN_ATTR_WRITE_P0 = 1 << 8, ///< Writes predicate register P0 + HEX_IL_INSN_ATTR_WRITE_P1 = 1 << 9, ///< Writes predicate register P1 + HEX_IL_INSN_ATTR_WRITE_P2 = 1 << 10, ///< Writes predicate register P2 + HEX_IL_INSN_ATTR_WRITE_P3 = 1 << 11, ///< Writes predicate register P3 +} HexILInsnAttr; + +/** + * \brief Represents a single operation of an instruction. + */ +typedef struct { + HexILOpGetter get_il_op; ///< Pointer to the getter to retrieve the RzILOpEffects of this operation. + HexILInsnAttr attr; ///< Attributes to shuffle it to the correct position in the packets IL ops. + void /* HexInsn */ *hi; ///< The instruction this op belongs to. +} HexILOp; + +/** + * \brief Struct of instruction operations. Usually an instruction has only one operation + * but duplex and compound instructions can have more. + * The last op in this struct has all members set to NULL/0. + */ +typedef struct { + HexILOp op0; + HexILOp op1; + HexILOp end; +} HexILInsn; + typedef struct { bool is_sub; ///< Flag for sub-instructions. ut8 op_count; ///< The number of operands this instruction has. @@ -83,6 +153,9 @@ typedef struct { HexInsnID identifier; ///< The instruction identifier char text_infix[128]; ///< Textual disassembly of the instruction. HexOp ops[HEX_MAX_OPERANDS]; ///< The operands of the instructions. + HexILInsn il_insn; ///< RZIL instruction. These are not meant for execution! Use the packet ops for that. + ut8 slot; ///< The slot the instruction occupies. + RzFloatRMode fround_mode; ///< The float rounding mode of the instruction. } HexInsn; /** @@ -98,7 +171,7 @@ typedef struct { HexInsn *insn; ///< Pointer to instruction if is_duplex = false. } bin; ut32 addr; ///< Address of container. Equals address of instruction or of the high sub-instruction if this is a duplex. - ut32 opcode; ///< The instruction opcode. + ut32 bytes; ///< The instruction bytes. HexPktInfo pkt_info; ///< Packet related information. First/last instr., prefix and postfix for text etc. // Deprecated members will be removed on RzArch introduction. RZ_DEPRECATE RzAsmOp asm_op; ///< Private copy of AsmOp. Currently only of interest because it holds the utf8 flag. @@ -106,47 +179,91 @@ typedef struct { char text[296]; ///< Textual disassembly } HexInsnContainer; +#define HEX_LOG_SLOT_BIT_OFF 4 +#define HEX_LOG_SLOT_LOG_WIDTH 2 +#define HEX_LOG_SLOT_LOG_MASK 0b11 + +/** + * \brief Holds information about the execution of the packet. + */ +typedef struct { + RzBitVector *slot_cancelled; ///< Flags for cancelled slots. If bit at (1 << slot i) is set, slot i is cancelled. + RzBitVector *pred_read; ///< Predicate register (P0-P3) read, if flags set at (1 << reg_num) are set. + RzBitVector *pred_tmp_read; ///< Tmp predicate register (P0-P3) read, if flags set at (1 << reg_num) are set. + RzBitVector *pred_written; ///< Predicate register (P0-P3) written, if flags (3:0) are set at (1 << pred_num). + ///< The bits[11:4] are used to indicate the last slot which wrote to the predicate (2bit each). + ///< Details are necessary because, if instructions in different slots + ///< write to the same predicate, the result is ANDed. + RzBitVector *gpr_read; ///< GPR register (R0-R31) read, if flags set at (1 << reg_num) are set. + RzBitVector *gpr_tmp_read; ///< Tmp GPR register (R0-R31) read, if flags set at (1 << reg_num) are set. + RzBitVector *gpr_written; ///< GPR register (R0-R31) written, if flags set at (1 << reg_num) are set. + RzBitVector *ctr_read; ///< Control register (C0-C31) read, if flags set at (1 << reg_num) are set. + RzBitVector *ctr_tmp_read; ///< Tmp control register (C0-C31) read, if flags set at (1 << reg_num) are set. + RzBitVector *ctr_written; ///< Control register (C0-C31) written, if flags set at (1 << reg_num) are set. +} HexILExecData; + /** * \brief Represents an Hexagon instruction packet. * We do not assign instructions to slots, but the order of instructions matters nonetheless. - * The layout of a packet is: + * The layout of a real packet is: * * low addr | Slot 3 * ---------+---------- * | Slot 2 * ---------+---------- - * | Slot 1 -> High Sub-Instruction is always in Slot 1 + * | Slot 1 -> High Sub-Instruction of Duplex is always in Slot 1 * ---------+---------- - * high addr| Slot 0 -> Low Sub-Instruction is always in Slot 0 + * high addr| Slot 0 -> Low Sub-Instruction of Duplex is always in Slot 0 * * Because of this order the textual disassembly of duplex instructions is: " ; ". * Also, the high sub-instruction is located at the _lower_ memory address (aligned to 4 bytes). * The low sub-instruction at . * * This said: The HexPkt.bin holds only instruction container, no instructions! - * The container holds the instructions or sub-instructions. + * The container holds a normal instruction or two sub-instructions. */ typedef struct { - RzList /**/ *bin; ///< Descending by address sorted list of instruction containers. bool last_instr_present; ///< Has an instruction the parsing bits 0b11 set (is last instruction). bool is_valid; ///< Is it a valid packet? Do we know which instruction is the first? + bool is_eob; ///< Is this packet the end of a code block? E.g. contains unconditional jmp. + HexLoopAttr hw_loop; ///< If the packet is the end of a hardware loop, it stores here from which one.s ut32 hw_loop0_addr; ///< Start address of hardware loop 0 ut32 hw_loop1_addr; ///< Start address of hardware loop 1 - ut64 last_access; ///< Last time accessed in milliseconds ut32 pkt_addr; ///< Address of the packet. Equals the address of the first instruction. - bool is_eob; ///< Is this packet the end of a code block? E.g. contains unconditional jmp. + ut64 last_access; ///< Last time accessed in milliseconds + RzList /**/ *bin; ///< Descending by address sorted list of instruction containers. + RzPVector /**/ *il_ops; ///< Pointer to RZIL ops of the packet. If empty the il ops were not shuffled into order yet. + HexILExecData il_op_stats; ///< Meta information about the IL operations executed (register read/written etc.) } HexPkt; +/** + * \brief This struct is given to the IL getter of each instruction. + * They use it for resolving register names, alias and the like. + */ +typedef struct { + const HexInsn *insn; + HexPkt *pkt; +} HexInsnPktBundle; + typedef struct { - ut32 addr; // Address of the instruction which gets the extender applied. - ut32 const_ext; // The constant extender value. + ut32 addr; ///< Address of the instruction which gets the extender applied. + ut32 const_ext; ///< The constant extender value. } HexConstExt; +/** + * \brief Flags for the debug printing about the state packet buffer. + */ +typedef enum { + HEX_BUF_ADD = 0, ///< Instruction is added to a specific packet i. + HEX_BUF_STALE = 1, ///< Instruction is written to a stale packet (overwrites old one). + HEX_BUF_NEW = 2, ///< Instruction is written to a new packet (overwrites old one). +} HexBufferAction; + /** * \brief Buffer packets for reversed instructions. - * */ typedef struct { + bool just_init; ///< Flag indicates if IL VM was just initialized. HexPkt pkts[HEXAGON_STATE_PKTS]; // buffered instructions RzList /**/ *const_ext_l; // Constant extender values. RzAsm rz_asm; // Copy of RzAsm struct. Holds certain flags of interesed for disassembly formatting. @@ -154,6 +271,19 @@ typedef struct { RzPVector /**/ *token_patterns; ///< PVector with token patterns. Priority ordered. } HexState; +/** + * \brief Register fields of different registers. + */ +typedef enum { + HEX_REG_FIELD_USR_LPCFG, ///< The LPCFG field of the USR register + HEX_REG_FIELD_USR_OVF, ///< The OVF field of the USR register +} HexRegField; + +typedef enum { + HEX_RF_WIDTH, + HEX_RF_OFFSET, +} HexRegFieldProperty; + typedef enum { HEX_REG_CLASS_CTR_REGS, HEX_REG_CLASS_CTR_REGS64, @@ -195,6 +325,16 @@ typedef enum { HEX_REG_CTR_REGS_C17 = 17, // framekey HEX_REG_CTR_REGS_C18 = 18, // pktcountlo HEX_REG_CTR_REGS_C19 = 19, // pktcounthi + HEX_REG_CTR_REGS_C20 = 20, // C20 + HEX_REG_CTR_REGS_C21 = 21, + HEX_REG_CTR_REGS_C22 = 22, + HEX_REG_CTR_REGS_C23 = 23, + HEX_REG_CTR_REGS_C24 = 24, + HEX_REG_CTR_REGS_C25 = 25, + HEX_REG_CTR_REGS_C26 = 26, + HEX_REG_CTR_REGS_C27 = 27, + HEX_REG_CTR_REGS_C28 = 28, + HEX_REG_CTR_REGS_C29 = 29, HEX_REG_CTR_REGS_C30 = 30, // utimerlo HEX_REG_CTR_REGS_C31 = 31, // utimerhi } HEX_CTR_REGS; // CtrRegs @@ -210,6 +350,11 @@ typedef enum { HEX_REG_CTR_REGS64_C15_14 = 14, // upcycle HEX_REG_CTR_REGS64_C17_16 = 16, HEX_REG_CTR_REGS64_C19_18 = 18, // pktcount + HEX_REG_CTR_REGS64_C21_20 = 20, + HEX_REG_CTR_REGS64_C23_22 = 22, + HEX_REG_CTR_REGS64_C25_24 = 24, + HEX_REG_CTR_REGS64_C27_26 = 26, + HEX_REG_CTR_REGS64_C29_28 = 28, HEX_REG_CTR_REGS64_C31_30 = 30, // utimer } HEX_CTR_REGS64; // CtrRegs64 @@ -573,34 +718,127 @@ typedef enum { HEX_REG_SYS_REGS64_S79_78 = 78, } HEX_SYS_REGS64; // SysRegs64 -#define BIT_MASK(len) (BIT(len) - 1) -#define BF_MASK(start, len) (BIT_MASK(len) << (start)) -#define BF_PREP(x, start, len) (((x)&BIT_MASK(len)) << (start)) -#define BF_GET(y, start, len) (((y) >> (start)) & BIT_MASK(len)) -#define BF_GETB(y, start, end) (BF_GET((y), (start), (end) - (start) + 1) - -char *hex_get_ctr_regs(int opcode_reg, bool get_alias); -char *hex_get_ctr_regs64(int opcode_reg, bool get_alias); -char *hex_get_double_regs(int opcode_reg, bool get_alias); -char *hex_get_general_double_low8_regs(int opcode_reg, bool get_alias); -char *hex_get_general_sub_regs(int opcode_reg, bool get_alias); -char *hex_get_guest_regs(int opcode_reg, bool get_alias); -char *hex_get_guest_regs64(int opcode_reg, bool get_alias); -char *hex_get_hvx_qr(int opcode_reg, bool get_alias); -char *hex_get_hvx_vqr(int opcode_reg, bool get_alias); -char *hex_get_hvx_vr(int opcode_reg, bool get_alias); -char *hex_get_hvx_wr(int opcode_reg, bool get_alias); -char *hex_get_int_regs(int opcode_reg, bool get_alias); -char *hex_get_int_regs_low8(int opcode_reg, bool get_alias); -char *hex_get_mod_regs(int opcode_reg, bool get_alias); -char *hex_get_pred_regs(int opcode_reg, bool get_alias); -char *hex_get_sys_regs(int opcode_reg, bool get_alias); -char *hex_get_sys_regs64(int opcode_reg, bool get_alias); -char *hex_get_reg_in_class(HexRegClass cls, int opcode_reg, bool get_alias); +typedef enum { + HEX_REG_ALIAS_SA0 = 0, + HEX_REG_ALIAS_LC0 = 1, + HEX_REG_ALIAS_SA1 = 2, + HEX_REG_ALIAS_LC1 = 3, + HEX_REG_ALIAS_P3_0 = 4, + HEX_REG_ALIAS_C5 = 5, + HEX_REG_ALIAS_M0 = 6, + HEX_REG_ALIAS_M1 = 7, + HEX_REG_ALIAS_USR = 8, + HEX_REG_ALIAS_PC = 9, + HEX_REG_ALIAS_UGP = 10, + HEX_REG_ALIAS_GP = 11, + HEX_REG_ALIAS_CS0 = 12, + HEX_REG_ALIAS_CS1 = 13, + HEX_REG_ALIAS_UPCYCLELO = 14, + HEX_REG_ALIAS_UPCYCLEHI = 15, + HEX_REG_ALIAS_FRAMELIMIT = 16, + HEX_REG_ALIAS_FRAMEKEY = 17, + HEX_REG_ALIAS_PKTCOUNTLO = 18, + HEX_REG_ALIAS_PKTCOUNTHI = 19, + HEX_REG_ALIAS_C20 = 20, + HEX_REG_ALIAS_UTIMERLO = 21, + HEX_REG_ALIAS_UTIMERHI = 22, + HEX_REG_ALIAS_LC0_SA0 = 23, + HEX_REG_ALIAS_LC1_SA1 = 24, + HEX_REG_ALIAS_M1_0 = 25, + HEX_REG_ALIAS_CS1_0 = 26, + HEX_REG_ALIAS_UPCYCLE = 27, + HEX_REG_ALIAS_PKTCOUNT = 28, + HEX_REG_ALIAS_UTIMER = 29, + HEX_REG_ALIAS_LR_FP = 30, + HEX_REG_ALIAS_GELR = 31, + HEX_REG_ALIAS_GSR = 32, + HEX_REG_ALIAS_GOSP = 33, + HEX_REG_ALIAS_GBADVA = 34, + HEX_REG_ALIAS_GPMUCNT4 = 35, + HEX_REG_ALIAS_GPMUCNT5 = 36, + HEX_REG_ALIAS_GPMUCNT6 = 37, + HEX_REG_ALIAS_GPMUCNT7 = 38, + HEX_REG_ALIAS_GPCYCLELO = 39, + HEX_REG_ALIAS_GPCYCLEHI = 40, + HEX_REG_ALIAS_GPMUCNT0 = 41, + HEX_REG_ALIAS_GPMUCNT1 = 42, + HEX_REG_ALIAS_GPMUCNT2 = 43, + HEX_REG_ALIAS_GPMUCNT3 = 44, + HEX_REG_ALIAS_SP = 45, + HEX_REG_ALIAS_FP = 46, + HEX_REG_ALIAS_LR = 47, + HEX_REG_ALIAS_SGP0 = 48, + HEX_REG_ALIAS_SGP1 = 49, + HEX_REG_ALIAS_STID = 50, + HEX_REG_ALIAS_ELR = 51, + HEX_REG_ALIAS_BADVA0 = 52, + HEX_REG_ALIAS_BADVA1 = 53, + HEX_REG_ALIAS_SSR = 54, + HEX_REG_ALIAS_CCR = 55, + HEX_REG_ALIAS_HTID = 56, + HEX_REG_ALIAS_BADVA = 57, + HEX_REG_ALIAS_IMASK = 58, + HEX_REG_ALIAS_EVB = 59, + HEX_REG_ALIAS_MODECTL = 60, + HEX_REG_ALIAS_SYSCFG = 61, + HEX_REG_ALIAS_S19 = 62, + HEX_REG_ALIAS_S20 = 63, + HEX_REG_ALIAS_VID = 64, + HEX_REG_ALIAS_S22 = 65, + HEX_REG_ALIAS_CFGBASE = 66, + HEX_REG_ALIAS_DIAG = 67, + HEX_REG_ALIAS_REV = 68, + HEX_REG_ALIAS_PCYCLELO = 69, + HEX_REG_ALIAS_PCYCLEHI = 70, + HEX_REG_ALIAS_ISDBST = 71, + HEX_REG_ALIAS_ISDBCFG0 = 72, + HEX_REG_ALIAS_ISDBCFG1 = 73, + HEX_REG_ALIAS_BRKPTPC0 = 74, + HEX_REG_ALIAS_BRKPTCFG0 = 75, + HEX_REG_ALIAS_BRKPTPC1 = 76, + HEX_REG_ALIAS_BRKPTCFG1 = 77, + HEX_REG_ALIAS_ISDBMBXIN = 78, + HEX_REG_ALIAS_ISDBMBXOUT = 79, + HEX_REG_ALIAS_ISDBEN = 80, + HEX_REG_ALIAS_ISDBGPR = 81, + HEX_REG_ALIAS_PMUCNT0 = 82, + HEX_REG_ALIAS_PMUCNT1 = 83, + HEX_REG_ALIAS_PMUCNT2 = 84, + HEX_REG_ALIAS_PMUCNT3 = 85, + HEX_REG_ALIAS_PMUEVTCFG = 86, + HEX_REG_ALIAS_PMUCFG = 87, + HEX_REG_ALIAS_SGP1_0 = 88, + HEX_REG_ALIAS_BADVA1_0 = 89, + HEX_REG_ALIAS_CCR_SSR = 90, + HEX_REG_ALIAS_PCYCLE = 91, +} HexRegAlias; + +RZ_API ut32 hex_resolve_reg_enum_id(HexRegClass class, ut32 reg_num); +const char *hex_get_ctr_regs(int reg_num, bool get_alias, bool get_new, bool reg_num_is_enum); +const char *hex_get_ctr_regs64(int reg_num, bool get_alias, bool get_new, bool reg_num_is_enum); +const char *hex_get_double_regs(int reg_num, bool get_alias, bool get_new, bool reg_num_is_enum); +const char *hex_get_general_double_low8_regs(int reg_num, bool get_alias, bool get_new, bool reg_num_is_enum); +const char *hex_get_general_sub_regs(int reg_num, bool get_alias, bool get_new, bool reg_num_is_enum); +const char *hex_get_guest_regs(int reg_num, bool get_alias, bool get_new, bool reg_num_is_enum); +const char *hex_get_guest_regs64(int reg_num, bool get_alias, bool get_new, bool reg_num_is_enum); +const char *hex_get_hvx_qr(int reg_num, bool get_alias, bool get_new, bool reg_num_is_enum); +const char *hex_get_hvx_vqr(int reg_num, bool get_alias, bool get_new, bool reg_num_is_enum); +const char *hex_get_hvx_vr(int reg_num, bool get_alias, bool get_new, bool reg_num_is_enum); +const char *hex_get_hvx_wr(int reg_num, bool get_alias, bool get_new, bool reg_num_is_enum); +const char *hex_get_int_regs(int reg_num, bool get_alias, bool get_new, bool reg_num_is_enum); +const char *hex_get_int_regs_low8(int reg_num, bool get_alias, bool get_new, bool reg_num_is_enum); +const char *hex_get_mod_regs(int reg_num, bool get_alias, bool get_new, bool reg_num_is_enum); +const char *hex_get_pred_regs(int reg_num, bool get_alias, bool get_new, bool reg_num_is_enum); +const char *hex_get_sys_regs(int reg_num, bool get_alias, bool get_new, bool reg_num_is_enum); +const char *hex_get_sys_regs64(int reg_num, bool get_alias, bool get_new, bool reg_num_is_enum); +RZ_API const char *hex_get_reg_in_class(HexRegClass cls, int reg_num, bool get_alias, bool get_new, bool reg_num_is_enum); RZ_API RZ_BORROW RzConfig *hexagon_get_config(); RZ_API void hex_extend_op(HexState *state, RZ_INOUT HexOp *op, const bool set_new_extender, const ut32 addr); int resolve_n_register(const int reg_num, const ut32 addr, const HexPkt *p); int hexagon_disasm_instruction(HexState *state, const ut32 hi_u32, RZ_INOUT HexInsnContainer *hi, HexPkt *pkt); +RZ_API const HexOp hex_alias_to_op(HexRegAlias alias, bool tmp_reg); +RZ_API const char *hex_alias_to_reg_name(HexRegAlias alias, bool tmp_reg); +RZ_API const HexOp hex_explicit_to_op(ut32 reg_num, HexRegClass reg_class, bool tmp_reg); -#endif \ No newline at end of file +#endif diff --git a/librz/arch/isa/hexagon/hexagon_arch.c b/librz/arch/isa/hexagon/hexagon_arch.c index a061f13aff3..cc245073fad 100644 --- a/librz/arch/isa/hexagon/hexagon_arch.c +++ b/librz/arch/isa/hexagon/hexagon_arch.c @@ -1,9 +1,9 @@ // SPDX-FileCopyrightText: 2021 Rot127 // SPDX-License-Identifier: LGPL-3.0-only -// LLVM commit: 96e220e6886868d6663d966ecc396befffc355e7 -// LLVM commit date: 2022-01-05 11:01:52 +0000 (ISO 8601 format) -// Date of code generation: 2022-08-06 14:13:29-04:00 +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 //======================================== // The following code is generated. // Do not edit. Repository of code generator: @@ -12,9 +12,14 @@ #include #include #include -#include "hexagon.h" -#include "hexagon_insn.h" -#include "hexagon_arch.h" +#include +#include +#include +#include + +static inline bool is_invalid_insn_data(ut32 data) { + return data == HEX_INVALID_INSN_0 || data == HEX_INVALID_INSN_F; +} static inline bool is_last_instr(const ut8 parse_bits) { // Duplex instr. (parse bits = 0) are always the last. @@ -77,7 +82,7 @@ static inline bool is_endloop01_pkt(const ut8 pb_hi_0, const ut8 pb_hi_1) { * \param addr The address of the instruction. * \return Pointer to instruction or NULL if none was found. */ -static HexInsnContainer *hex_get_hic_at_addr(HexState *state, const ut32 addr) { +RZ_API HexInsnContainer *hex_get_hic_at_addr(HexState *state, const ut32 addr) { HexPkt *p; for (ut8 i = 0; i < HEXAGON_STATE_PKTS; ++i) { p = &state->pkts[i]; @@ -86,6 +91,7 @@ static HexInsnContainer *hex_get_hic_at_addr(HexState *state, const ut32 addr) { rz_list_foreach (p->bin, iter, hic) { if (addr == hic->addr) { p->last_access = rz_time_now(); + RZ_LOG_DEBUG("===== RET buffed_pkts[%d] hic @ 0x010%x ====> \n", i, addr); return hic; } } @@ -103,6 +109,78 @@ static inline bool hic_at_addr(RZ_NONNULL const HexInsnContainer *hic, const ut3 return (hic->addr == addr) || (hic->is_duplex && sub_insn_at_addr(hic, addr)); } +/** + * \brief Gives for an ISA register character the register name. + * E.g.: If the ISA instruction uses the variable "Rd", it passes 'd' as identifier to this function. + * The function returns a concrete register name like "R3", "R10" or any other name which is associated with the id. + * + * \param hi The hexagon instruction. + * \param isa_id The ISA register character. + * \param new_reg If true it will return the .new register name ("R3_tmp", "R10_tmp" etc.) + * \return const char * The concrete register name. Or NULL on error. + */ +RZ_API const HexOp *hex_isa_to_reg(const HexInsn *hi, const char isa_id, bool new_reg) { + rz_return_val_if_fail(hi && isa_id, NULL); + const HexOp *op = NULL; + for (ut32 i = 0; i < hi->op_count; ++i) { + if ((hi->ops[i].isa_id == isa_id) && (hi->ops[i].type == HEX_OP_TYPE_REG)) { + op = &hi->ops[i]; + break; + } + } + if (!op) { + RZ_LOG_WARN("Could not find equivalent register for ISA variable \'%c\'\n", isa_id); + return NULL; + } + return op; +} + +/** + * \brief Gives for an n-register the HexOp. + * + * \param bundle The packet and instruction bundle. + * \param isa_id The ISA register character this reg is known to the instruction. + * \return HexOp The HexOp. Or {0} on error. + */ +RZ_API const HexOp hex_nreg_to_op(const HexInsnPktBundle *bundle, const char isa_id) { + rz_return_val_if_fail(bundle && isa_id, (HexOp){ 0 }); + const HexInsn *hi = bundle->insn; + const HexOp *op = NULL; + for (ut32 i = 0; i < hi->op_count; ++i) { + if ((hi->ops[i].isa_id == isa_id) && (hi->ops[i].type == HEX_OP_TYPE_REG)) { + op = &hi->ops[i]; + break; + } + } + if (!op) { + RZ_LOG_WARN("Could not find equivalent register for ISA variable \'%c\'\n", isa_id); + return (HexOp){ 0 }; + } + + HexOp nop = *op; + nop.op.reg = resolve_n_register(op->op.reg, hi->addr, bundle->pkt); + + return nop; +} + +/** + * \brief Gives for a ISA immediate character the immediate value stored in the instruction. + * + * \param hi The hexagon instruction. + * \param isa_id The character which identifies the immediate. + * \return ut64 The immediate value. + */ +RZ_API ut64 hex_isa_to_imm(const HexInsn *hi, const char isa_id) { + rz_return_val_if_fail(hi && isa_id, 0); + for (ut32 i = 0; i < hi->op_count; ++i) { + if (hi->ops[i].isa_id == isa_id && (hi->ops[i].type == HEX_OP_TYPE_IMM)) { + return hi->ops[i].op.imm; + } + } + RZ_LOG_WARN("No immediate operand for \'%c\' found.\n", isa_id); + return 0; +} + /** * \brief Returns the index of an addr in a given packet. * @@ -135,6 +213,7 @@ static void hex_clear_pkt(RZ_NONNULL HexPkt *p) { p->is_valid = false; p->last_access = 0; rz_list_purge(p->bin); + rz_pvector_clear(p->il_ops); } /** @@ -149,9 +228,11 @@ static HexPkt *hex_get_stale_pkt(HexState *state) { for (ut8 i = 0; i < HEXAGON_STATE_PKTS; ++i) { if (state->pkts[i].last_access < oldest) { + oldest = state->pkts[i].last_access; stale_state_pkt = &state->pkts[i]; } } + hex_clear_pkt(stale_state_pkt); return stale_state_pkt; } @@ -162,7 +243,7 @@ static HexPkt *hex_get_stale_pkt(HexState *state) { * \param addr The address of an instruction. * \return HexPkt* The packet to which this address belongs to or NULL if no packet was found. */ -static HexPkt *hex_get_pkt(HexState *state, const ut32 addr) { +RZ_API HexPkt *hex_get_pkt(RZ_BORROW HexState *state, const ut32 addr) { HexPkt *p = NULL; HexInsnContainer *hic = NULL; RzListIter *iter = NULL; @@ -170,6 +251,7 @@ static HexPkt *hex_get_pkt(HexState *state, const ut32 addr) { p = &state->pkts[i]; rz_list_foreach (p->bin, iter, hic) { if (hic_at_addr(hic, addr)) { + p->last_access = rz_time_now(); return p; } } @@ -192,30 +274,23 @@ RZ_API void hex_insn_free(RZ_NULLABLE HexInsn *i) { * \param i The instruction container to be freed. */ RZ_API void hex_insn_container_free(RZ_NULLABLE HexInsnContainer *c) { - if (c && c->is_duplex) { + if (c) { + // bin is a uninion. Just free all of them. hex_insn_free(c->bin.sub[0]); hex_insn_free(c->bin.sub[1]); - } else if (c) { - hex_insn_free(c->bin.insn); } free(c); } /** - * \brief Copies one instruction container to another. + * \brief Moves one instruction container to another. * * \param dest The destination insruction container. * \param src The source instruction container. */ -RZ_API void hex_copy_insn_container(RZ_OUT HexInsnContainer *dest, const HexInsnContainer *src) { +RZ_API void hex_move_insn_container(RZ_OUT HexInsnContainer *dest, const HexInsnContainer *src) { rz_return_if_fail(dest && src); - memcpy(dest, src, sizeof(HexInsnContainer)); - if (src->is_duplex) { - memcpy(dest->bin.sub[0], src->bin.sub[0], sizeof(HexInsn)); - memcpy(dest->bin.sub[1], src->bin.sub[1], sizeof(HexInsn)); - } else { - memcpy(dest->bin.insn, src->bin.insn, sizeof(HexInsn)); - } + memmove(dest, src, sizeof(HexInsnContainer)); } /** @@ -247,11 +322,19 @@ static ut8 get_state_pkt_index(HexState *state, const HexPkt *p) { /** * \brief Initializes each packet of the state once. + * Note that this state is not thread safe. + * It requires RzArch for this. * - * \return The initialized state of the plugins. + * \param reset Reset the state to NULL. Assumes it was freed before. + * + * \return The initialized state of the plugins or NULL if \p reset = true. */ -RZ_API HexState *hexagon_get_state() { +RZ_API HexState *hexagon_state(bool reset) { static HexState *state = NULL; + if (reset) { + state = NULL; + return NULL; + } if (state) { return state; } @@ -262,12 +345,14 @@ RZ_API HexState *hexagon_get_state() { } for (int i = 0; i < HEXAGON_STATE_PKTS; ++i) { state->pkts[i].bin = rz_list_newf((RzListFree)hex_insn_container_free); + state->pkts[i].il_ops = rz_pvector_new(NULL); if (!state->pkts[i].bin) { RZ_LOG_FATAL("Could not initialize instruction list!"); } hex_clear_pkt(&(state->pkts[i])); } state->const_ext_l = rz_list_newf((RzListFree)hex_const_ext_free); + state->token_patterns = NULL; return state; } @@ -416,14 +501,14 @@ void hex_set_hic_text(RZ_INOUT HexInsnContainer *hic) { * \brief Sets the packet related information in an instruction. * * \param hi The instruction. - * \param p The packet the instruction belongs to. + * \param pkt The packet the instruction belongs to. * \param k The index of the instruction within the packet. */ -static void hex_set_pkt_info(const RzAsm *rz_asm, RZ_INOUT HexInsnContainer *hic, const HexPkt *p, const ut8 k, const bool update_text) { - rz_return_if_fail(hic && p); +static void hex_set_pkt_info(const RzAsm *rz_asm, RZ_INOUT HexInsnContainer *hic, const HexPkt *pkt, const ut8 k, const bool update_text) { + rz_return_if_fail(hic && pkt); bool is_first = (k == 0); HexPktInfo *hi_pi = &hic->pkt_info; - HexState *state = hexagon_get_state(); + HexState *state = hexagon_state(false); bool sdk_form = rz_config_get_b(state->cfg, "plugins.hexagon.sdk"); strncpy(hi_pi->text_postfix, "", 16); @@ -431,7 +516,7 @@ static void hex_set_pkt_info(const RzAsm *rz_asm, RZ_INOUT HexInsnContainer *hic if (is_first && is_last_instr(hic->parse_bits)) { // Single instruction packet. hi_pi->first_insn = true; hi_pi->last_insn = true; - if (p->is_valid) { + if (pkt->is_valid) { strncpy(hi_pi->text_prefix, get_pkt_indicator(rz_asm->utf8, sdk_form, true, SINGLE_IN_PKT), 8); if (sdk_form) { strncpy(hi_pi->text_postfix, get_pkt_indicator(rz_asm->utf8, sdk_form, false, SINGLE_IN_PKT), 8); @@ -442,7 +527,7 @@ static void hex_set_pkt_info(const RzAsm *rz_asm, RZ_INOUT HexInsnContainer *hic } else if (is_first) { hi_pi->first_insn = true; hi_pi->last_insn = false; - if (p->is_valid) { + if (pkt->is_valid) { strncpy(hi_pi->text_prefix, get_pkt_indicator(rz_asm->utf8, sdk_form, true, FIRST_IN_PKT), 8); } else { strncpy(hi_pi->text_prefix, HEX_PKT_UNK, 8); @@ -450,13 +535,13 @@ static void hex_set_pkt_info(const RzAsm *rz_asm, RZ_INOUT HexInsnContainer *hic } else if (is_last_instr(hic->parse_bits)) { hi_pi->first_insn = false; hi_pi->last_insn = true; - if (p->is_valid) { + if (pkt->is_valid) { strncpy(hi_pi->text_prefix, get_pkt_indicator(rz_asm->utf8, sdk_form, true, LAST_IN_PKT), 8); if (sdk_form) { strncpy(hi_pi->text_postfix, get_pkt_indicator(rz_asm->utf8, sdk_form, false, LAST_IN_PKT), 8); } - switch (hex_get_loop_flag(p)) { + switch (hex_get_loop_flag(pkt)) { default: break; case HEX_LOOP_01: @@ -475,7 +560,7 @@ static void hex_set_pkt_info(const RzAsm *rz_asm, RZ_INOUT HexInsnContainer *hic } else { hi_pi->first_insn = false; hi_pi->last_insn = false; - if (p->is_valid) { + if (pkt->is_valid) { strncpy(hi_pi->text_prefix, get_pkt_indicator(rz_asm->utf8, sdk_form, true, MID_IN_PKT), 8); } else { strncpy(hi_pi->text_prefix, HEX_PKT_UNK, 8); @@ -516,13 +601,42 @@ RZ_API HexLoopAttr hex_get_loop_flag(const HexPkt *p) { } /** - * \brief Sets the packet after pkt to valid and updates its textual assembly. + * \brief Sets the given packet to valid and updates the packet information of + * each instruction in it. + * + * \param state The to operate on. + * \param pkt The packet to set to valid. + */ +static void make_packet_valid(RZ_BORROW HexState *state, RZ_BORROW HexPkt *pkt) { + rz_return_if_fail(state && pkt); + pkt->is_valid = true; + HexInsnContainer *hi = NULL; + RzListIter *it = NULL; + ut8 i = 0; + ut8 slot = 0; + rz_list_foreach (pkt->bin, it, hi) { + hex_set_pkt_info(&state->rz_asm, hi, pkt, i, true); + if (hi->is_duplex) { + hi->bin.sub[0]->slot = 0; + hi->bin.sub[1]->slot = 1; + slot = 2; + } else { + hi->bin.insn->slot = slot; + ++slot; + } + ++i; + } + pkt->last_access = rz_time_now(); +} + +/** + * \brief Sets the packet after \p pkt to valid and updates its textual assembly. * * \param state The state to operate on. * \param pkt The packet which predecessor will be updated. */ static void make_next_packet_valid(HexState *state, const HexPkt *pkt) { - HexInsnContainer *tmp = rz_list_last(pkt->bin); + HexInsnContainer *tmp = rz_list_get_n(pkt->bin, 0); if (!tmp) { return; } @@ -535,15 +649,7 @@ static void make_next_packet_valid(HexState *state, const HexPkt *pkt) { if (p->is_valid) { break; } - p->is_valid = true; - HexInsnContainer *hi = NULL; - RzListIter *it = NULL; - ut8 k = 0; - rz_list_foreach (p->bin, it, hi) { - hex_set_pkt_info(&state->rz_asm, hi, p, k, true); - ++k; - } - p->last_access = rz_time_now(); + make_packet_valid(state, p); break; } } @@ -559,6 +665,7 @@ RZ_API HexInsn *hexagon_alloc_instr() { if (!hi) { RZ_LOG_FATAL("Could not allocate memory for new instruction.\n"); } + hi->fround_mode = RZ_FLOAT_RMODE_RNE; return hi; } @@ -580,31 +687,32 @@ RZ_API HexInsnContainer *hexagon_alloc_instr_container() { * * \param state The state to operate on. * \param new_hic The instruction container to copy. - * \param p The packet in which will hold the instruction container. + * \param pkt The packet in which will hold the instruction container. * \param k The index of the instruction container in the packet. * \return HexInsnContainer* Pointer to the copied instruction container on the heap. */ -static HexInsnContainer *hex_add_to_pkt(HexState *state, const HexInsnContainer *new_hic, RZ_INOUT HexPkt *p, const ut8 k) { +static HexInsnContainer *hex_add_to_pkt(HexState *state, const HexInsnContainer *new_hic, RZ_INOUT HexPkt *pkt, const ut8 k) { if (k > 3) { RZ_LOG_FATAL("Instruction could not be set! A packet can only hold four instructions but k=%d.", k); } HexInsnContainer *hic = hexagon_alloc_instr_container(); - hex_copy_insn_container(hic, new_hic); - rz_list_insert(p->bin, k, hic); + hex_move_insn_container(hic, new_hic); + rz_list_del_n(pkt->bin, k); + rz_list_insert(pkt->bin, k, hic); if (k == 0) { - p->pkt_addr = hic->addr; + pkt->pkt_addr = hic->addr; } - p->last_instr_present |= is_last_instr(hic->parse_bits); - ut32 p_l = rz_list_length(p->bin); - hex_set_pkt_info(&state->rz_asm, hic, p, k, false); + pkt->last_instr_present |= is_last_instr(hic->parse_bits); + ut32 p_l = rz_list_length(pkt->bin); + hex_set_pkt_info(&state->rz_asm, hic, pkt, k, false); if (k == 0 && p_l > 1) { // Update the instruction which was previously the first one. - hex_set_pkt_info(&state->rz_asm, rz_list_get_n(p->bin, 1), p, 1, true); + hex_set_pkt_info(&state->rz_asm, rz_list_get_n(pkt->bin, 1), pkt, 1, true); } - p->last_access = rz_time_now(); - if (p->last_instr_present) { - make_next_packet_valid(state, p); + pkt->last_access = rz_time_now(); + if (pkt->last_instr_present) { + make_next_packet_valid(state, pkt); } return hic; } @@ -614,26 +722,27 @@ static HexInsnContainer *hex_add_to_pkt(HexState *state, const HexInsnContainer * * \param state The state to operate on. * \param new_hic The instruction container to copy. - * \param p The old packet which attributes are copied to the new one. - * \param new_p The new packet will hold the instruction container. + * \param pkt The old packet which attributes are copied to the new one. + * \param new_pkt The new packet will hold the instruction container. * \return HexInsnContainer* Pointer to the copied instruction container on the heap. */ -static HexInsnContainer *hex_to_new_pkt(HexState *state, const HexInsnContainer *new_hic, const HexPkt *p, RZ_INOUT HexPkt *new_p) { - hex_clear_pkt(new_p); +static HexInsnContainer *hex_to_new_pkt(HexState *state, const HexInsnContainer *new_hic, const HexPkt *pkt, RZ_INOUT HexPkt *new_pkt) { + hex_clear_pkt(new_pkt); HexInsnContainer *hic = hexagon_alloc_instr_container(); - hex_copy_insn_container(hic, new_hic); - rz_list_insert(new_p->bin, 0, hic); - - new_p->last_instr_present |= is_last_instr(hic->parse_bits); - new_p->hw_loop0_addr = p->hw_loop0_addr; - new_p->hw_loop1_addr = p->hw_loop1_addr; - new_p->is_valid = (p->is_valid || p->last_instr_present); - new_p->pkt_addr = hic->addr; - new_p->last_access = rz_time_now(); - hex_set_pkt_info(&state->rz_asm, hic, new_p, 0, false); - if (new_p->last_instr_present) { - make_next_packet_valid(state, new_p); + hex_move_insn_container(hic, new_hic); + rz_list_del_n(new_pkt->bin, 0); + rz_list_insert(new_pkt->bin, 0, hic); + + new_pkt->last_instr_present |= is_last_instr(hic->parse_bits); + new_pkt->hw_loop0_addr = pkt->hw_loop0_addr; + new_pkt->hw_loop1_addr = pkt->hw_loop1_addr; + new_pkt->is_valid = (pkt->is_valid || pkt->last_instr_present); + new_pkt->pkt_addr = hic->addr; + new_pkt->last_access = rz_time_now(); + hex_set_pkt_info(&state->rz_asm, hic, new_pkt, 0, false); + if (new_pkt->last_instr_present) { + make_next_packet_valid(state, new_pkt); } return hic; } @@ -646,24 +755,101 @@ static HexInsnContainer *hex_to_new_pkt(HexState *state, const HexInsnContainer * \return HexInsnContainer* Pointer to the copied instruction container on the heap. */ static HexInsnContainer *hex_add_to_stale_pkt(HexState *state, const HexInsnContainer *new_hic) { - HexPkt *p = hex_get_stale_pkt(state); - hex_clear_pkt(p); + HexPkt *pkt = hex_get_stale_pkt(state); + hex_clear_pkt(pkt); HexInsnContainer *hic = hexagon_alloc_instr_container(); - hex_copy_insn_container(hic, new_hic); - rz_list_insert(p->bin, 0, hic); + hex_move_insn_container(hic, new_hic); + rz_list_insert(pkt->bin, 0, hic); - p->last_instr_present |= is_last_instr(hic->parse_bits); - p->pkt_addr = new_hic->addr; + pkt->last_instr_present |= is_last_instr(hic->parse_bits); + pkt->pkt_addr = new_hic->addr; // p->is_valid = true; // Setting it true also detects a lot of data as valid assembly. - p->last_access = rz_time_now(); - hex_set_pkt_info(&state->rz_asm, hic, p, 0, false); - if (p->last_instr_present) { - make_next_packet_valid(state, p); + pkt->last_access = rz_time_now(); + hex_set_pkt_info(&state->rz_asm, hic, pkt, 0, false); + if (pkt->last_instr_present) { + make_next_packet_valid(state, pkt); } return hic; } +#if RZ_BUILD_DEBUG +static char desc_letter_hic(const HexInsnContainer *hic) { + char desc = ' '; + if (!hic) { + desc = ' '; + } else if (hic->is_duplex) { + desc = hic->bin.sub[0]->identifier != HEX_INS_INVALID_DECODE ? 'v' : 'i'; + desc = hic->pkt_info.last_insn ? 'l' : desc; + } else { + desc = hic->bin.insn->identifier != HEX_INS_INVALID_DECODE ? 'v' : 'i'; + desc = hic->pkt_info.last_insn ? 'l' : desc; + } + return desc; +} +#endif + +static void print_state_pkt(const HexState *state, st32 index, HexBufferAction action, const HexInsnContainer *new_hic) { +#if RZ_BUILD_DEBUG + ut32 oldest = 7; + ut32 newest = 0; + ut64 min_time = 0xffffffffffffffff; + ut64 max_time = 0; + for (int i = 0; i < HEXAGON_STATE_PKTS; ++i) { + const HexPkt *pkt = &state->pkts[i]; + if (pkt->last_access < min_time) { + min_time = pkt->last_access; + oldest = i; + } + if (pkt->last_access > max_time) { + max_time = pkt->last_access; + newest = i; + } + } + RZ_LOG_DEBUG("╭─────┬──────────────┬─────┬──────────────────┬───────────────╮\n"); + RZ_LOG_DEBUG("│ pkt │ packet │ │ │ [i]n[v]alid │\n"); + RZ_LOG_DEBUG("│ id │ address │ age │ last access │ [l]ast │\n"); + RZ_LOG_DEBUG("├─────┼──────────────┼─────┼──────────────────┼───┬───┬───┬───┤\n"); + RzStrBuf *pkt_line = rz_strbuf_new(""); + for (int i = 0; i < HEXAGON_STATE_PKTS; ++i) { + const HexPkt *pkt = &state->pkts[i]; + const char *time_ind = " "; + if (i == oldest) { + time_ind = "old"; + } else if (i == newest) { + time_ind = "new"; + } + rz_strbuf_appendf(pkt_line, "│ %d │ 0x%010x │ %s │ %016llu │ ", i, pkt->pkt_addr, time_ind, pkt->last_access); + HexInsnContainer *hic = NULL; + for (int j = 0; j < 4; ++j) { + hic = rz_list_get_n(pkt->bin, j); + const char desc = desc_letter_hic(hic); + rz_strbuf_appendf(pkt_line, "%c │ ", desc); + } + if ((index < 0 && i == oldest) || (index == i)) { + const char desc = desc_letter_hic(new_hic); + rz_strbuf_append(pkt_line, " < "); + if (action == HEX_BUF_ADD) { + rz_strbuf_appendf(pkt_line, "%s %c", "ADDED", desc); + } else if (action == HEX_BUF_STALE) { + rz_strbuf_appendf(pkt_line, "added %c %s", desc, "to STALE"); + } else if (action == HEX_BUF_NEW) { + rz_strbuf_appendf(pkt_line, "added %c %s", desc, "to NEW"); + } + } + rz_strbuf_append(pkt_line, "\n"); + RZ_LOG_DEBUG("%s", rz_strbuf_get(pkt_line)); + rz_strbuf_fini(pkt_line); + if (i < HEXAGON_STATE_PKTS - 1) { + RZ_LOG_DEBUG("├─────┼──────────────┼─────┼──────────────────┼───┼───┼───┼───┤\n"); + } else { + RZ_LOG_DEBUG("╰─────┴──────────────┴─────┴──────────────────┴───┴───┴───┴───╯\n"); + } + } + rz_strbuf_free(pkt_line); +#endif +} + /** * \brief Copies the given instruction container to a state packet it belongs to. * If the instruction container does not fit to any packet, it will be written to a stale one. @@ -689,17 +875,18 @@ static HexInsnContainer *hex_add_hic_to_state(HexState *state, const HexInsnCont return hex_add_to_stale_pkt(state, new_hic); } - for (ut8 i = 0; i < HEXAGON_STATE_PKTS; ++i, k = 0) { + ut32 i = 0; + for (; i < HEXAGON_STATE_PKTS; ++i, k = 0) { p = &(state->pkts[i]); HexInsnContainer *p_hic = NULL; // Instructions container already in the packet. RzListIter *iter = NULL; rz_list_foreach (p->bin, iter, p_hic) { if (new_hic->addr == (p_hic->addr - 4)) { - // Instruction preceeds one in the packet. + // Instruction precedes one in the packet. if (is_last_instr(new_hic->parse_bits) || is_pkt_full(p)) { - write_to_stale_pkt = true; - break; + // Continue searching. The instruction might belong to another packet. + continue; } else { insert_before_pkt_hi = true; add_to_pkt = true; @@ -720,20 +907,31 @@ static HexInsnContainer *hex_add_hic_to_state(HexState *state, const HexInsnCont break; } } + if (!add_to_pkt && !new_pkt && !write_to_stale_pkt) { + // No packet found this one belongs to. + // Add to a stale one. + write_to_stale_pkt = true; + } // Add the instruction to packet p if (add_to_pkt) { if (insert_before_pkt_hi) { - return hex_add_to_pkt(state, new_hic, p, k); + HexInsnContainer *result_hic = hex_add_to_pkt(state, new_hic, p, k); + print_state_pkt(state, i, HEX_BUF_ADD, result_hic); + return result_hic; } - return hex_add_to_pkt(state, new_hic, p, k + 1); - + HexInsnContainer *result_hic = hex_add_to_pkt(state, new_hic, p, k + 1); + print_state_pkt(state, i, HEX_BUF_ADD, result_hic); + return result_hic; } else if (new_pkt) { ut8 ni = (get_state_pkt_index(state, p) + 1) % HEXAGON_STATE_PKTS; - return hex_to_new_pkt(state, new_hic, p, &state->pkts[ni]); - } else { - return hex_add_to_stale_pkt(state, new_hic); + HexInsnContainer *result_hic = hex_to_new_pkt(state, new_hic, p, &state->pkts[ni]); + print_state_pkt(state, ni, HEX_BUF_NEW, result_hic); + return result_hic; } + HexInsnContainer *result_hic = hex_add_to_stale_pkt(state, new_hic); + print_state_pkt(state, -1, HEX_BUF_STALE, result_hic); + return result_hic; } /** @@ -744,8 +942,11 @@ static HexInsnContainer *hex_add_hic_to_state(HexState *state, const HexInsnCont * \param addr The address of the instruction container. * \param parse_bits The parse bits of the instruction container. */ -static void setup_new_hic(HexInsnContainer *hic, const HexReversedOpcode *rz_reverse, const ut32 addr, const ut8 parse_bits) { +static void setup_new_hic(HexInsnContainer *hic, const HexReversedOpcode *rz_reverse, const ut32 addr, const ut8 parse_bits, ut32 data) { + rz_return_if_fail(hic && rz_reverse); + bool invalid = is_invalid_insn_data(data); hic->identifier = HEX_INS_INVALID_DECODE; + hic->bytes = data; hic->addr = addr; hic->parse_bits = parse_bits; if (rz_reverse->asm_op) { @@ -765,7 +966,11 @@ static void setup_new_hic(HexInsnContainer *hic, const HexReversedOpcode *rz_rev hic->asm_op.size = 4; hic->ana_op.size = 4; - if (parse_bits == 0b00) { + + hic->bin.sub[0] = NULL; + hic->bin.sub[1] = NULL; + if (parse_bits == 0b00 && !invalid) { + hic->is_duplex = true; hic->bin.sub[0] = hexagon_alloc_instr(); hic->bin.sub[1] = hexagon_alloc_instr(); } else { @@ -830,15 +1035,43 @@ RZ_API void hex_extend_op(HexState *state, RZ_INOUT HexOp *op, const bool set_ne } } +static void copy_asm_ana_ops(const HexState *state, RZ_BORROW HexReversedOpcode *rz_reverse, RZ_BORROW HexInsnContainer *hic) { + rz_return_if_fail(state && rz_reverse && hic); + switch (rz_reverse->action) { + default: + memcpy(rz_reverse->asm_op, &hic->asm_op, sizeof(RzAsmOp)); + memcpy(rz_reverse->ana_op, &hic->ana_op, sizeof(RzAnalysisOp)); + rz_strbuf_set(&rz_reverse->asm_op->buf_asm, hic->text); + rz_reverse->asm_op->asm_toks = rz_asm_tokenize_asm_regex(&rz_reverse->asm_op->buf_asm, state->token_patterns); + if (rz_reverse->asm_op->asm_toks) { + rz_reverse->asm_op->asm_toks->op_type = hic->ana_op.type; + } + break; + case HEXAGON_DISAS: + memcpy(rz_reverse->asm_op, &hic->asm_op, sizeof(RzAsmOp)); + rz_strbuf_set(&rz_reverse->asm_op->buf_asm, hic->text); + rz_reverse->asm_op->asm_toks = rz_asm_tokenize_asm_regex(&rz_reverse->asm_op->buf_asm, state->token_patterns); + if (rz_reverse->asm_op->asm_toks) { + rz_reverse->asm_op->asm_toks->op_type = hic->ana_op.type; + } + break; + case HEXAGON_ANALYSIS: + memcpy(rz_reverse->ana_op, &hic->ana_op, sizeof(RzAnalysisOp)); + break; + } +} + /** - * \brief Reverses a given opcode and copies the result into one of the rizin structs in rz_reverse. + * \brief Reverses a given opcode and copies the result into one of the rizin structs in rz_reverse + * if \p copy_result is set. * * \param rz_reverse Rizin core structs which store asm and analysis information. * \param buf The buffer which stores the current opcode. * \param addr The address of the current opcode. + * \param copy_result If set, it copies the result. Otherwise it only buffers it in the internal state. */ -RZ_API void hexagon_reverse_opcode(const RzAsm *rz_asm, HexReversedOpcode *rz_reverse, const ut8 *buf, const ut64 addr) { - HexState *state = hexagon_get_state(); +RZ_API void hexagon_reverse_opcode(const RzAsm *rz_asm, HexReversedOpcode *rz_reverse, const ut8 *buf, const ut64 addr, const bool copy_result) { + HexState *state = hexagon_state(false); if (!state) { RZ_LOG_FATAL("HexState was NULL."); } @@ -846,32 +1079,29 @@ RZ_API void hexagon_reverse_opcode(const RzAsm *rz_asm, HexReversedOpcode *rz_re memcpy(&state->rz_asm, rz_asm, sizeof(RzAsm)); } HexInsnContainer *hic = hex_get_hic_at_addr(state, addr); - if (hic) { - // Opcode was already reversed and is still in the state. Copy the result and return. - switch (rz_reverse->action) { - default: - memcpy(rz_reverse->asm_op, &(hic->asm_op), sizeof(RzAsmOp)); - memcpy(rz_reverse->ana_op, &(hic->ana_op), sizeof(RzAnalysisOp)); - rz_strbuf_set(&rz_reverse->asm_op->buf_asm, hic->text); - rz_reverse->asm_op->asm_toks = rz_asm_tokenize_asm_regex(&rz_reverse->asm_op->buf_asm, state->token_patterns); - rz_reverse->asm_op->asm_toks->op_type = hic->ana_op.type; - return; - case HEXAGON_DISAS: - memcpy(rz_reverse->asm_op, &(hic->asm_op), sizeof(RzAsmOp)); - rz_strbuf_set(&rz_reverse->asm_op->buf_asm, hic->text); - rz_reverse->asm_op->asm_toks = rz_asm_tokenize_asm_regex(&rz_reverse->asm_op->buf_asm, state->token_patterns); - rz_reverse->asm_op->asm_toks->op_type = hic->ana_op.type; - return; - case HEXAGON_ANALYSIS: - memcpy(rz_reverse->ana_op, &(hic->ana_op), sizeof(RzAnalysisOp)); - return; + if (hic && !is_invalid_insn_data(hic->bytes)) { + // Code was already reversed and is still in the state. Copy the result and return. + // + // We never return buffered instructions of 0x00000000 and 0xffffffff. + // Because Rizin's IO layer is not a transparent view into the binary. + // Sometimes it passes a buffer for address `a` of size `n`, which has only + // `m` bytes of actual binary data set (where `m < n`). + // Although, there are still valid instructions bytes at `a + m` in the + // actual binary. So the IO layer only passes a certain window of `n - m` valid bytes + // and sets the rest to `0x0` or `0xff`. + // So previously we might have disassembled and buffered those invalid bytes + // at `a + m`. Although in the actual binary there are valid + // instructions at this address. + if (copy_result) { + copy_asm_ana_ops(state, rz_reverse, hic); } + return; } ut32 data = rz_read_le32(buf); ut8 parse_bits = (data & HEX_PARSE_BITS_MASK) >> 14; HexInsnContainer hic_new = { 0 }; - setup_new_hic(&hic_new, rz_reverse, addr, parse_bits); + setup_new_hic(&hic_new, rz_reverse, addr, parse_bits, data); // Add to state hic = hex_add_hic_to_state(state, &hic_new); if (!hic) { @@ -879,29 +1109,10 @@ RZ_API void hexagon_reverse_opcode(const RzAsm *rz_asm, HexReversedOpcode *rz_re } HexPkt *p = hex_get_pkt(state, hic->addr); - // Do disasassembly and analysis + // Do disassembly and analysis hexagon_disasm_instruction(state, data, hic, p); - switch (rz_reverse->action) { - default: - memcpy(rz_reverse->asm_op, &hic->asm_op, sizeof(RzAsmOp)); - memcpy(rz_reverse->ana_op, &hic->ana_op, sizeof(RzAnalysisOp)); - rz_strbuf_set(&rz_reverse->asm_op->buf_asm, hic->text); - rz_reverse->asm_op->asm_toks = rz_asm_tokenize_asm_regex(&rz_reverse->asm_op->buf_asm, state->token_patterns); - if (rz_reverse->asm_op->asm_toks) { - rz_reverse->asm_op->asm_toks->op_type = hic->ana_op.type; - } - break; - case HEXAGON_DISAS: - memcpy(rz_reverse->asm_op, &hic->asm_op, sizeof(RzAsmOp)); - rz_strbuf_set(&rz_reverse->asm_op->buf_asm, hic->text); - rz_reverse->asm_op->asm_toks = rz_asm_tokenize_asm_regex(&rz_reverse->asm_op->buf_asm, state->token_patterns); - if (rz_reverse->asm_op->asm_toks) { - rz_reverse->asm_op->asm_toks->op_type = hic->ana_op.type; - } - break; - case HEXAGON_ANALYSIS: - memcpy(rz_reverse->ana_op, &hic->ana_op, sizeof(RzAnalysisOp)); - break; + if (copy_result) { + copy_asm_ana_ops(state, rz_reverse, hic); } -} +} \ No newline at end of file diff --git a/librz/arch/isa/hexagon/hexagon_arch.h b/librz/arch/isa/hexagon/hexagon_arch.h index 84dd70ac28f..e29c4b53d79 100644 --- a/librz/arch/isa/hexagon/hexagon_arch.h +++ b/librz/arch/isa/hexagon/hexagon_arch.h @@ -1,9 +1,9 @@ // SPDX-FileCopyrightText: 2021 Rot127 // SPDX-License-Identifier: LGPL-3.0-only -// LLVM commit: 96e220e6886868d6663d966ecc396befffc355e7 -// LLVM commit date: 2022-01-05 11:01:52 +0000 (ISO 8601 format) -// Date of code generation: 2022-07-17 18:44:39-04:00 +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 //======================================== // The following code is generated. // Do not edit. Repository of code generator: @@ -11,7 +11,7 @@ #ifndef HEXAGON_ARCH_H #define HEXAGON_ARCH_H -#include "hexagon.h" +#include // The packet position indicators added to the instruction text. typedef enum { @@ -67,10 +67,16 @@ RZ_API void hex_insn_free(RZ_NULLABLE HexInsn *i); RZ_API HexInsnContainer *hexagon_alloc_instr_container(); RZ_API void hex_insn_container_free(RZ_NULLABLE HexInsnContainer *c); RZ_API void hex_const_ext_free(RZ_NULLABLE HexConstExt *ce); -RZ_API HexState *hexagon_get_state(); -RZ_API void hexagon_reverse_opcode(const RzAsm *rz_asm, HexReversedOpcode *rz_reverse, const ut8 *buf, const ut64 addr); +RZ_API HexState *hexagon_state(bool reset); +RZ_IPI void hexagon_state_fini(HexState *state); +RZ_API void hexagon_reverse_opcode(const RzAsm *rz_asm, HexReversedOpcode *rz_reverse, const ut8 *buf, const ut64 addr, const bool copy_result); RZ_API ut8 hexagon_get_pkt_index_of_addr(const ut32 addr, const HexPkt *p); RZ_API HexLoopAttr hex_get_loop_flag(const HexPkt *p); +RZ_API const HexOp *hex_isa_to_reg(const HexInsn *hi, const char isa_id, bool new_reg); +RZ_API ut64 hex_isa_to_imm(const HexInsn *hi, const char isa_id); void hex_set_hic_text(RZ_INOUT HexInsnContainer *hic); -RZ_API void hex_copy_insn_container(RZ_OUT HexInsnContainer *dest, const HexInsnContainer *src); +RZ_API void hex_move_insn_container(RZ_OUT HexInsnContainer *dest, const HexInsnContainer *src); +RZ_API HexPkt *hex_get_pkt(RZ_BORROW HexState *state, const ut32 addr); +RZ_API HexInsnContainer *hex_get_hic_at_addr(HexState *state, const ut32 addr); +RZ_API const HexOp hex_nreg_to_op(const HexInsnPktBundle *bundle, const char isa_id); #endif \ No newline at end of file diff --git a/librz/arch/isa/hexagon/hexagon_disas.c b/librz/arch/isa/hexagon/hexagon_disas.c index d21568a1662..e23bc5e00d2 100644 --- a/librz/arch/isa/hexagon/hexagon_disas.c +++ b/librz/arch/isa/hexagon/hexagon_disas.c @@ -3,7 +3,7 @@ // LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c // LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) -// Date of code generation: 2023-11-15 11:30:41-05:00 +// Date of code generation: 2024-03-16 06:22:39-05:00 //======================================== // The following code is generated. // Do not edit. Repository of code generator: @@ -16,9 +16,9 @@ #include #include #include -#include "hexagon.h" -#include "hexagon_insn.h" -#include "hexagon_arch.h" +#include +#include +#include #if ASAN && !defined(__clang__) #define NO_OPT_IF_ASAN __attribute__((optimize(0))) @@ -64,6 +64,7 @@ typedef struct { typedef struct { ut8 info; // HexOpTemplateType | HexOpTemplateFlag ut8 syntax; // offset into HexInsnTemplate.syntax where to insert this op + char isa_id; HexOpMask masks[HEX_OP_MASKS_MAX]; union { ut8 imm_scale; @@ -100,9 +101,9 @@ static const HexInsnTemplate templates_sub_A[] = { .encoding = { .mask = 0xf0001800, .op = 0x0 }, .id = HEX_INS_SA1_ADDI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 4 } }, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 'x', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 4 } }, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -114,9 +115,9 @@ static const HexInsnTemplate templates_sub_A[] = { .encoding = { .mask = 0xf0001f00, .op = 0x1800 }, .id = HEX_INS_SA1_ADDRX, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 'x', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -128,8 +129,8 @@ static const HexInsnTemplate templates_sub_A[] = { .encoding = { .mask = 0xf0001c00, .op = 0xc00 }, .id = HEX_INS_SA1_ADDSP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 4 } }, .imm_scale = 2, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 4 } }, .isa_id = 'u', .imm_scale = 2, .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -141,8 +142,8 @@ static const HexInsnTemplate templates_sub_A[] = { .encoding = { .mask = 0xf0001f00, .op = 0x1200 }, .id = HEX_INS_SA1_AND1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -154,7 +155,7 @@ static const HexInsnTemplate templates_sub_A[] = { .encoding = { .mask = 0xf0001ff0, .op = 0x1a70 }, .id = HEX_INS_SA1_CLRF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 'd', .syntax = 9 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -167,7 +168,7 @@ static const HexInsnTemplate templates_sub_A[] = { .encoding = { .mask = 0xf0001ff0, .op = 0x1a50 }, .id = HEX_INS_SA1_CLRFNEW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 'd', .syntax = 13 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -180,7 +181,7 @@ static const HexInsnTemplate templates_sub_A[] = { .encoding = { .mask = 0xf0001ff0, .op = 0x1a60 }, .id = HEX_INS_SA1_CLRT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 'd', .syntax = 8 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -193,7 +194,7 @@ static const HexInsnTemplate templates_sub_A[] = { .encoding = { .mask = 0xf0001ff0, .op = 0x1a40 }, .id = HEX_INS_SA1_CLRTNEW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 'd', .syntax = 12 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -206,8 +207,8 @@ static const HexInsnTemplate templates_sub_A[] = { .encoding = { .mask = 0xf0001f0c, .op = 0x1900 }, .id = HEX_INS_SA1_CMPEQI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x2, 0 } }, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x2, 0 } }, .isa_id = 'u', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -219,8 +220,8 @@ static const HexInsnTemplate templates_sub_A[] = { .encoding = { .mask = 0xf0001f98, .op = 0x1c00 }, .id = HEX_INS_SA1_COMBINE0I, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_DOUBLE_LOW8_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x2, 5 } }, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_DOUBLE_LOW8_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x2, 5 } }, .isa_id = 'u', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -232,8 +233,8 @@ static const HexInsnTemplate templates_sub_A[] = { .encoding = { .mask = 0xf0001f98, .op = 0x1c08 }, .id = HEX_INS_SA1_COMBINE1I, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_DOUBLE_LOW8_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x2, 5 } }, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_DOUBLE_LOW8_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x2, 5 } }, .isa_id = 'u', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -245,8 +246,8 @@ static const HexInsnTemplate templates_sub_A[] = { .encoding = { .mask = 0xf0001f98, .op = 0x1c10 }, .id = HEX_INS_SA1_COMBINE2I, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_DOUBLE_LOW8_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x2, 5 } }, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_DOUBLE_LOW8_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x2, 5 } }, .isa_id = 'u', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -258,8 +259,8 @@ static const HexInsnTemplate templates_sub_A[] = { .encoding = { .mask = 0xf0001f98, .op = 0x1c18 }, .id = HEX_INS_SA1_COMBINE3I, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_DOUBLE_LOW8_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x2, 5 } }, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_DOUBLE_LOW8_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x2, 5 } }, .isa_id = 'u', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -271,8 +272,8 @@ static const HexInsnTemplate templates_sub_A[] = { .encoding = { .mask = 0xf0001f08, .op = 0x1d08 }, .id = HEX_INS_SA1_COMBINERZ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_DOUBLE_LOW8_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_DOUBLE_LOW8_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -284,8 +285,8 @@ static const HexInsnTemplate templates_sub_A[] = { .encoding = { .mask = 0xf0001f08, .op = 0x1d00 }, .id = HEX_INS_SA1_COMBINEZR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_DOUBLE_LOW8_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_DOUBLE_LOW8_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -297,8 +298,8 @@ static const HexInsnTemplate templates_sub_A[] = { .encoding = { .mask = 0xf0001f00, .op = 0x1300 }, .id = HEX_INS_SA1_DEC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 7 }, { .info = HEX_OP_TEMPLATE_TYPE_IMM_CONST, .syntax = 8 }, }, .pred = HEX_NOPRED, @@ -311,8 +312,8 @@ static const HexInsnTemplate templates_sub_A[] = { .encoding = { .mask = 0xf0001f00, .op = 0x1100 }, .id = HEX_INS_SA1_INC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -324,8 +325,8 @@ static const HexInsnTemplate templates_sub_A[] = { .encoding = { .mask = 0xf0001c00, .op = 0x800 }, .id = HEX_INS_SA1_SETI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 4 } }, .syntax = 3 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 4 } }, .isa_id = 'u', .syntax = 3 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -337,7 +338,7 @@ static const HexInsnTemplate templates_sub_A[] = { .encoding = { .mask = 0xf0001ff0, .op = 0x1a00 }, .id = HEX_INS_SA1_SETIN1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 'd', .syntax = 0 }, { .info = HEX_OP_TEMPLATE_TYPE_IMM_CONST, .syntax = 3 }, }, .pred = HEX_NOPRED, @@ -350,8 +351,8 @@ static const HexInsnTemplate templates_sub_A[] = { .encoding = { .mask = 0xf0001f00, .op = 0x1500 }, .id = HEX_INS_SA1_SXTB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -363,8 +364,8 @@ static const HexInsnTemplate templates_sub_A[] = { .encoding = { .mask = 0xf0001f00, .op = 0x1400 }, .id = HEX_INS_SA1_SXTH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -376,8 +377,8 @@ static const HexInsnTemplate templates_sub_A[] = { .encoding = { .mask = 0xf0001f00, .op = 0x1000 }, .id = HEX_INS_SA1_TFR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 3 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 3 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -389,8 +390,8 @@ static const HexInsnTemplate templates_sub_A[] = { .encoding = { .mask = 0xf0001f00, .op = 0x1700 }, .id = HEX_INS_SA1_ZXTB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -402,8 +403,8 @@ static const HexInsnTemplate templates_sub_A[] = { .encoding = { .mask = 0xf0001f00, .op = 0x1600 }, .id = HEX_INS_SA1_ZXTH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -415,8 +416,8 @@ static const HexInsnTemplate templates_sub_A[] = { .encoding = { .mask = 0xf0001c00, .op = 0x800 }, .id = HEX_INS_UNDOCUMENTED_SA2_TFRSI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 4 } }, .syntax = 3 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 4 } }, .isa_id = 'u', .syntax = 3 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -432,9 +433,9 @@ static const HexInsnTemplate templates_sub_L1[] = { .encoding = { .mask = 0xf0001000, .op = 0x0 }, .id = HEX_INS_SL1_LOADRI_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 8 } }, .imm_scale = 2, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 8 } }, .isa_id = 'u', .imm_scale = 2, .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -446,9 +447,9 @@ static const HexInsnTemplate templates_sub_L1[] = { .encoding = { .mask = 0xf0001000, .op = 0x1000 }, .id = HEX_INS_SL1_LOADRUB_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 8 } }, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 8 } }, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -522,9 +523,9 @@ static const HexInsnTemplate templates_sub_L2[] = { .encoding = { .mask = 0xf0001800, .op = 0x1000 }, .id = HEX_INS_SL2_LOADRB_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -536,8 +537,8 @@ static const HexInsnTemplate templates_sub_L2[] = { .encoding = { .mask = 0xf0001f00, .op = 0x1e00 }, .id = HEX_INS_SL2_LOADRD_SP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_DOUBLE_LOW8_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 3 } }, .imm_scale = 3, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_DOUBLE_LOW8_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 3 } }, .isa_id = 'u', .imm_scale = 3, .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -549,9 +550,9 @@ static const HexInsnTemplate templates_sub_L2[] = { .encoding = { .mask = 0xf0001800, .op = 0x0 }, .id = HEX_INS_SL2_LOADRH_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .imm_scale = 1, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -563,8 +564,8 @@ static const HexInsnTemplate templates_sub_L2[] = { .encoding = { .mask = 0xf0001e00, .op = 0x1c00 }, .id = HEX_INS_SL2_LOADRI_SP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 4 } }, .imm_scale = 2, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 4 } }, .isa_id = 'u', .imm_scale = 2, .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -576,9 +577,9 @@ static const HexInsnTemplate templates_sub_L2[] = { .encoding = { .mask = 0xf0001800, .op = 0x800 }, .id = HEX_INS_SL2_LOADRUH_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .imm_scale = 1, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -643,9 +644,9 @@ static const HexInsnTemplate templates_sub_S1[] = { .encoding = { .mask = 0xf0001000, .op = 0x1000 }, .id = HEX_INS_SS1_STOREB_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 8 } }, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 8 } }, .isa_id = 'u', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -657,9 +658,9 @@ static const HexInsnTemplate templates_sub_S1[] = { .encoding = { .mask = 0xf0001000, .op = 0x0 }, .id = HEX_INS_SS1_STOREW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 8 } }, .imm_scale = 2, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 8 } }, .isa_id = 'u', .imm_scale = 2, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -675,7 +676,7 @@ static const HexInsnTemplate templates_sub_S2[] = { .encoding = { .mask = 0xf0001e0f, .op = 0x1c00 }, .id = HEX_INS_SS2_ALLOCFRAME, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 4 } }, .imm_scale = 3, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 4 } }, .isa_id = 'u', .imm_scale = 3, .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -687,8 +688,8 @@ static const HexInsnTemplate templates_sub_S2[] = { .encoding = { .mask = 0xf0001f00, .op = 0x1200 }, .id = HEX_INS_SS2_STOREBI0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 0 } }, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 0 } }, .isa_id = 'u', .syntax = 6 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -700,8 +701,8 @@ static const HexInsnTemplate templates_sub_S2[] = { .encoding = { .mask = 0xf0001f00, .op = 0x1300 }, .id = HEX_INS_SS2_STOREBI1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 0 } }, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 0 } }, .isa_id = 'u', .syntax = 6 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -713,8 +714,8 @@ static const HexInsnTemplate templates_sub_S2[] = { .encoding = { .mask = 0xf0001e00, .op = 0xa00 }, .id = HEX_INS_SS2_STORED_SP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 3 } }, .imm_scale = 3, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_DOUBLE_LOW8_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 3 } }, .isa_id = 's', .imm_scale = 3, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_DOUBLE_LOW8_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -726,9 +727,9 @@ static const HexInsnTemplate templates_sub_S2[] = { .encoding = { .mask = 0xf0001800, .op = 0x0 }, .id = HEX_INS_SS2_STOREH_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .imm_scale = 1, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -740,8 +741,8 @@ static const HexInsnTemplate templates_sub_S2[] = { .encoding = { .mask = 0xf0001e00, .op = 0x800 }, .id = HEX_INS_SS2_STOREW_SP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 4 } }, .imm_scale = 2, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 4 } }, .isa_id = 'u', .imm_scale = 2, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 0 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -753,8 +754,8 @@ static const HexInsnTemplate templates_sub_S2[] = { .encoding = { .mask = 0xf0001f00, .op = 0x1000 }, .id = HEX_INS_SS2_STOREWI0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 0 } }, .imm_scale = 2, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 0 } }, .isa_id = 'u', .imm_scale = 2, .syntax = 6 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -766,8 +767,8 @@ static const HexInsnTemplate templates_sub_S2[] = { .encoding = { .mask = 0xf0001f00, .op = 0x1100 }, .id = HEX_INS_SS2_STOREWI1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 0 } }, .imm_scale = 2, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 4 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 0 } }, .isa_id = 'u', .imm_scale = 2, .syntax = 6 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -783,7 +784,7 @@ static const HexInsnTemplate templates_normal_0x0[] = { .encoding = { .mask = 0xf0000000, .op = 0x0 }, .id = HEX_INS_A4_EXT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0xe, 0 }, { 0xc, 16 } }, .imm_scale = 6, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0xe, 0 }, { 0xc, 16 } }, .isa_id = 'u', .imm_scale = 6, .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -808,9 +809,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03001, .op = 0x14400000 }, .id = HEX_INS_J4_CMPEQ_FP0_JUMP_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 37 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 't', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 37 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -823,9 +824,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03001, .op = 0x14402000 }, .id = HEX_INS_J4_CMPEQ_FP0_JUMP_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 36 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 't', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 36 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -838,9 +839,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03001, .op = 0x14401000 }, .id = HEX_INS_J4_CMPEQ_FP1_JUMP_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 37 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 't', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 37 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -853,9 +854,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03001, .op = 0x14403000 }, .id = HEX_INS_J4_CMPEQ_FP1_JUMP_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 36 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 't', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 36 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -868,9 +869,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03001, .op = 0x14000000 }, .id = HEX_INS_J4_CMPEQ_TP0_JUMP_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 36 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 't', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 36 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -883,9 +884,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03001, .op = 0x14002000 }, .id = HEX_INS_J4_CMPEQ_TP0_JUMP_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 35 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 't', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 35 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -898,9 +899,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03001, .op = 0x14001000 }, .id = HEX_INS_J4_CMPEQ_TP1_JUMP_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 36 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 't', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 36 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -913,9 +914,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03001, .op = 0x14003000 }, .id = HEX_INS_J4_CMPEQ_TP1_JUMP_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 35 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 't', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 35 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -928,9 +929,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc02001, .op = 0x10400000 }, .id = HEX_INS_J4_CMPEQI_FP0_JUMP_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 37 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 37 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -943,9 +944,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc02001, .op = 0x10402000 }, .id = HEX_INS_J4_CMPEQI_FP0_JUMP_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 36 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 36 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -958,9 +959,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc02001, .op = 0x12400000 }, .id = HEX_INS_J4_CMPEQI_FP1_JUMP_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 37 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 37 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -973,9 +974,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc02001, .op = 0x12402000 }, .id = HEX_INS_J4_CMPEQI_FP1_JUMP_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 36 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 36 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -988,9 +989,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc02001, .op = 0x10000000 }, .id = HEX_INS_J4_CMPEQI_TP0_JUMP_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 36 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 36 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -1003,9 +1004,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc02001, .op = 0x10002000 }, .id = HEX_INS_J4_CMPEQI_TP0_JUMP_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 35 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 35 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -1018,9 +1019,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc02001, .op = 0x12000000 }, .id = HEX_INS_J4_CMPEQI_TP1_JUMP_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 36 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 36 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -1033,9 +1034,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc02001, .op = 0x12002000 }, .id = HEX_INS_J4_CMPEQI_TP1_JUMP_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 35 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 35 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -1048,9 +1049,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03f01, .op = 0x11c00000 }, .id = HEX_INS_J4_CMPEQN1_FP0_JUMP_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, { .info = HEX_OP_TEMPLATE_TYPE_IMM_CONST, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 37 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 37 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -1063,9 +1064,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03f01, .op = 0x11c02000 }, .id = HEX_INS_J4_CMPEQN1_FP0_JUMP_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, { .info = HEX_OP_TEMPLATE_TYPE_IMM_CONST, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 36 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 36 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -1078,9 +1079,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03f01, .op = 0x13c00000 }, .id = HEX_INS_J4_CMPEQN1_FP1_JUMP_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, { .info = HEX_OP_TEMPLATE_TYPE_IMM_CONST, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 37 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 37 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -1093,9 +1094,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03f01, .op = 0x13c02000 }, .id = HEX_INS_J4_CMPEQN1_FP1_JUMP_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, { .info = HEX_OP_TEMPLATE_TYPE_IMM_CONST, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 36 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 36 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -1108,9 +1109,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03f01, .op = 0x11800000 }, .id = HEX_INS_J4_CMPEQN1_TP0_JUMP_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, { .info = HEX_OP_TEMPLATE_TYPE_IMM_CONST, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 36 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 36 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -1123,9 +1124,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03f01, .op = 0x11802000 }, .id = HEX_INS_J4_CMPEQN1_TP0_JUMP_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, { .info = HEX_OP_TEMPLATE_TYPE_IMM_CONST, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 35 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 35 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -1138,9 +1139,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03f01, .op = 0x13800000 }, .id = HEX_INS_J4_CMPEQN1_TP1_JUMP_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, { .info = HEX_OP_TEMPLATE_TYPE_IMM_CONST, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 36 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 36 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -1153,9 +1154,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03f01, .op = 0x13802000 }, .id = HEX_INS_J4_CMPEQN1_TP1_JUMP_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, { .info = HEX_OP_TEMPLATE_TYPE_IMM_CONST, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 35 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 35 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -1168,9 +1169,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03001, .op = 0x14c00000 }, .id = HEX_INS_J4_CMPGT_FP0_JUMP_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 37 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 't', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 37 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -1183,9 +1184,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03001, .op = 0x14c02000 }, .id = HEX_INS_J4_CMPGT_FP0_JUMP_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 36 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 't', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 36 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -1198,9 +1199,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03001, .op = 0x14c01000 }, .id = HEX_INS_J4_CMPGT_FP1_JUMP_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 37 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 't', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 37 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -1213,9 +1214,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03001, .op = 0x14c03000 }, .id = HEX_INS_J4_CMPGT_FP1_JUMP_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 36 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 't', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 36 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -1228,9 +1229,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03001, .op = 0x14800000 }, .id = HEX_INS_J4_CMPGT_TP0_JUMP_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 36 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 't', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 36 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -1243,9 +1244,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03001, .op = 0x14802000 }, .id = HEX_INS_J4_CMPGT_TP0_JUMP_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 35 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 't', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 35 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -1258,9 +1259,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03001, .op = 0x14801000 }, .id = HEX_INS_J4_CMPGT_TP1_JUMP_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 36 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 't', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 36 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -1273,9 +1274,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03001, .op = 0x14803000 }, .id = HEX_INS_J4_CMPGT_TP1_JUMP_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 35 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 't', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 35 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -1288,9 +1289,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc02001, .op = 0x10c00000 }, .id = HEX_INS_J4_CMPGTI_FP0_JUMP_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 37 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 37 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -1303,9 +1304,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc02001, .op = 0x10c02000 }, .id = HEX_INS_J4_CMPGTI_FP0_JUMP_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 36 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 36 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -1318,9 +1319,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc02001, .op = 0x12c00000 }, .id = HEX_INS_J4_CMPGTI_FP1_JUMP_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 37 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 37 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -1333,9 +1334,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc02001, .op = 0x12c02000 }, .id = HEX_INS_J4_CMPGTI_FP1_JUMP_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 36 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 36 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -1348,9 +1349,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc02001, .op = 0x10800000 }, .id = HEX_INS_J4_CMPGTI_TP0_JUMP_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 36 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 36 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -1363,9 +1364,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc02001, .op = 0x10802000 }, .id = HEX_INS_J4_CMPGTI_TP0_JUMP_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 35 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 35 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -1378,9 +1379,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc02001, .op = 0x12800000 }, .id = HEX_INS_J4_CMPGTI_TP1_JUMP_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 36 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 36 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -1393,9 +1394,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc02001, .op = 0x12802000 }, .id = HEX_INS_J4_CMPGTI_TP1_JUMP_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 35 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 35 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -1408,9 +1409,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03f01, .op = 0x11c00100 }, .id = HEX_INS_J4_CMPGTN1_FP0_JUMP_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, { .info = HEX_OP_TEMPLATE_TYPE_IMM_CONST, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 37 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 37 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -1423,9 +1424,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03f01, .op = 0x11c02100 }, .id = HEX_INS_J4_CMPGTN1_FP0_JUMP_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, { .info = HEX_OP_TEMPLATE_TYPE_IMM_CONST, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 36 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 36 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -1438,9 +1439,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03f01, .op = 0x13c00100 }, .id = HEX_INS_J4_CMPGTN1_FP1_JUMP_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, { .info = HEX_OP_TEMPLATE_TYPE_IMM_CONST, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 37 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 37 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -1453,9 +1454,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03f01, .op = 0x13c02100 }, .id = HEX_INS_J4_CMPGTN1_FP1_JUMP_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, { .info = HEX_OP_TEMPLATE_TYPE_IMM_CONST, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 36 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 36 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -1468,9 +1469,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03f01, .op = 0x11800100 }, .id = HEX_INS_J4_CMPGTN1_TP0_JUMP_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, { .info = HEX_OP_TEMPLATE_TYPE_IMM_CONST, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 36 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 36 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -1483,9 +1484,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03f01, .op = 0x11802100 }, .id = HEX_INS_J4_CMPGTN1_TP0_JUMP_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, { .info = HEX_OP_TEMPLATE_TYPE_IMM_CONST, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 35 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 35 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -1498,9 +1499,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03f01, .op = 0x13800100 }, .id = HEX_INS_J4_CMPGTN1_TP1_JUMP_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, { .info = HEX_OP_TEMPLATE_TYPE_IMM_CONST, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 36 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 36 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -1513,9 +1514,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03f01, .op = 0x13802100 }, .id = HEX_INS_J4_CMPGTN1_TP1_JUMP_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, { .info = HEX_OP_TEMPLATE_TYPE_IMM_CONST, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 35 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 35 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -1528,9 +1529,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03001, .op = 0x15400000 }, .id = HEX_INS_J4_CMPGTU_FP0_JUMP_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 38 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 't', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 38 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -1543,9 +1544,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03001, .op = 0x15402000 }, .id = HEX_INS_J4_CMPGTU_FP0_JUMP_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 37 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 't', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 37 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -1558,9 +1559,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03001, .op = 0x15401000 }, .id = HEX_INS_J4_CMPGTU_FP1_JUMP_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 38 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 't', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 38 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -1573,9 +1574,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03001, .op = 0x15403000 }, .id = HEX_INS_J4_CMPGTU_FP1_JUMP_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 37 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 't', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 37 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -1588,9 +1589,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03001, .op = 0x15000000 }, .id = HEX_INS_J4_CMPGTU_TP0_JUMP_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 37 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 't', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 37 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -1603,9 +1604,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03001, .op = 0x15002000 }, .id = HEX_INS_J4_CMPGTU_TP0_JUMP_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 36 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 't', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 36 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -1618,9 +1619,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03001, .op = 0x15001000 }, .id = HEX_INS_J4_CMPGTU_TP1_JUMP_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 37 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 't', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 37 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -1633,9 +1634,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03001, .op = 0x15003000 }, .id = HEX_INS_J4_CMPGTU_TP1_JUMP_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 36 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 't', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 36 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -1648,9 +1649,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc02001, .op = 0x11400000 }, .id = HEX_INS_J4_CMPGTUI_FP0_JUMP_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 38 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 38 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -1663,9 +1664,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc02001, .op = 0x11402000 }, .id = HEX_INS_J4_CMPGTUI_FP0_JUMP_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 37 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 37 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -1678,9 +1679,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc02001, .op = 0x13400000 }, .id = HEX_INS_J4_CMPGTUI_FP1_JUMP_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 38 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 38 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -1693,9 +1694,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc02001, .op = 0x13402000 }, .id = HEX_INS_J4_CMPGTUI_FP1_JUMP_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 37 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 37 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -1708,9 +1709,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc02001, .op = 0x11000000 }, .id = HEX_INS_J4_CMPGTUI_TP0_JUMP_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 37 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 37 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -1723,9 +1724,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc02001, .op = 0x11002000 }, .id = HEX_INS_J4_CMPGTUI_TP0_JUMP_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 36 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 36 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -1738,9 +1739,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc02001, .op = 0x13000000 }, .id = HEX_INS_J4_CMPGTUI_TP1_JUMP_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 37 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 37 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -1753,9 +1754,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc02001, .op = 0x13002000 }, .id = HEX_INS_J4_CMPGTUI_TP1_JUMP_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 36 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 36 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -1768,9 +1769,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc00001, .op = 0x16000000 }, .id = HEX_INS_J4_JUMPSETI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 8 } }, .syntax = 3 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 8 } }, .isa_id = 'U', .syntax = 3 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -1783,9 +1784,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03001, .op = 0x17000000 }, .id = HEX_INS_J4_JUMPSETR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 3 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x4, 8 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 3 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -1798,8 +1799,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03f01, .op = 0x11c00300 }, .id = HEX_INS_J4_TSTBIT0_FP0_JUMP_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 39 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 39 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -1812,8 +1813,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03f01, .op = 0x11c02300 }, .id = HEX_INS_J4_TSTBIT0_FP0_JUMP_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 38 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 38 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -1826,8 +1827,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03f01, .op = 0x13c00300 }, .id = HEX_INS_J4_TSTBIT0_FP1_JUMP_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 39 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 39 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -1840,8 +1841,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03f01, .op = 0x13c02300 }, .id = HEX_INS_J4_TSTBIT0_FP1_JUMP_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 38 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 38 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -1854,8 +1855,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03f01, .op = 0x11800300 }, .id = HEX_INS_J4_TSTBIT0_TP0_JUMP_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 38 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 38 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -1868,8 +1869,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03f01, .op = 0x11802300 }, .id = HEX_INS_J4_TSTBIT0_TP0_JUMP_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 37 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 37 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -1882,8 +1883,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03f01, .op = 0x13800300 }, .id = HEX_INS_J4_TSTBIT0_TP1_JUMP_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 38 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 38 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -1896,8 +1897,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffc03f01, .op = 0x13802300 }, .id = HEX_INS_J4_TSTBIT0_TP1_JUMP_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 37 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x4, 16 } }, .reg_cls = HEX_REG_CLASS_GENERAL_SUB_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 37 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -1910,8 +1911,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x19c00040 }, .id = HEX_INS_V6_LVSPLATB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -1923,8 +1924,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x19c00020 }, .id = HEX_INS_V6_LVSPLATH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -1936,8 +1937,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x19a00020 }, .id = HEX_INS_V6_LVSPLATW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -1949,9 +1950,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff3f3cfc, .op = 0x1e030000 }, .id = HEX_INS_V6_PRED_AND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -1963,9 +1964,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff3f3cfc, .op = 0x1e030014 }, .id = HEX_INS_V6_PRED_AND_N, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -1977,8 +1978,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff3cfc, .op = 0x1e030008 }, .id = HEX_INS_V6_PRED_NOT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 's', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -1990,9 +1991,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff3f3cfc, .op = 0x1e030004 }, .id = HEX_INS_V6_PRED_OR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 's', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 't', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2004,9 +2005,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff3f3cfc, .op = 0x1e030010 }, .id = HEX_INS_V6_PRED_OR_N, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 's', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2018,8 +2019,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe03ffc, .op = 0x19a00044 }, .id = HEX_INS_V6_PRED_SCALAR2, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2031,8 +2032,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe03ffc, .op = 0x19a0004c }, .id = HEX_INS_V6_PRED_SCALAR2V2, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2044,9 +2045,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff3f3cfc, .op = 0x1e03000c }, .id = HEX_INS_V6_PRED_XOR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2058,9 +2059,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff3f3cfc, .op = 0x1e030018 }, .id = HEX_INS_V6_SHUFFEQH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2072,9 +2073,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff3f3cfc, .op = 0x1e03001c }, .id = HEX_INS_V6_SHUFFEQW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2086,10 +2087,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe02080, .op = 0x1f402080 }, .id = HEX_INS_V6_V6MPYHUBS10, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x2, 5 } }, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'v', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x2, 5 } }, .isa_id = 'u', .syntax = 18 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2101,10 +2102,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe02080, .op = 0x1f202080 }, .id = HEX_INS_V6_V6MPYHUBS10_VXX, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x2, 5 } }, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'v', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x2, 5 } }, .isa_id = 'u', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2116,10 +2117,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe02080, .op = 0x1f402000 }, .id = HEX_INS_V6_V6MPYVUBS10, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x2, 5 } }, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'v', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x2, 5 } }, .isa_id = 'u', .syntax = 18 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2131,10 +2132,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe02080, .op = 0x1f202000 }, .id = HEX_INS_V6_V6MPYVUBS10_VXX, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x2, 5 } }, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'v', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x2, 5 } }, .isa_id = 'u', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2146,8 +2147,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e062080 }, .id = HEX_INS_V6_VABS_HF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2159,8 +2160,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e0620a0 }, .id = HEX_INS_V6_VABS_SF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2172,8 +2173,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e010080 }, .id = HEX_INS_V6_VABSB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2185,8 +2186,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e0100a0 }, .id = HEX_INS_V6_VABSB_SAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2198,9 +2199,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1cc00020 }, .id = HEX_INS_V6_VABSDIFFH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 18 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2212,9 +2213,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1cc00000 }, .id = HEX_INS_V6_VABSDIFFUB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2226,9 +2227,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1cc00040 }, .id = HEX_INS_V6_VABSDIFFUH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2240,9 +2241,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1cc00060 }, .id = HEX_INS_V6_VABSDIFFW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 18 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2254,8 +2255,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e000000 }, .id = HEX_INS_V6_VABSH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2267,8 +2268,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e000020 }, .id = HEX_INS_V6_VABSH_SAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2280,8 +2281,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e000040 }, .id = HEX_INS_V6_VABSW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2293,8 +2294,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e000060 }, .id = HEX_INS_V6_VABSW_SAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2306,9 +2307,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f602060 }, .id = HEX_INS_V6_VADD_HF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2320,9 +2321,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fa020e0 }, .id = HEX_INS_V6_VADD_HF_HF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2334,9 +2335,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f602040 }, .id = HEX_INS_V6_VADD_QF16, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2348,9 +2349,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f602080 }, .id = HEX_INS_V6_VADD_QF16_MIX, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2362,9 +2363,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fa02000 }, .id = HEX_INS_V6_VADD_QF32, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2376,9 +2377,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fa02040 }, .id = HEX_INS_V6_VADD_QF32_MIX, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2390,9 +2391,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fa02020 }, .id = HEX_INS_V6_VADD_SF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2404,9 +2405,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1d4020c0 }, .id = HEX_INS_V6_VADD_SF_BF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2418,9 +2419,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f802080 }, .id = HEX_INS_V6_VADD_SF_HF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2432,9 +2433,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f8020c0 }, .id = HEX_INS_V6_VADD_SF_SF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2446,9 +2447,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fa000c0 }, .id = HEX_INS_V6_VADDB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2460,9 +2461,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c600080 }, .id = HEX_INS_V6_VADDB_DV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2474,9 +2475,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff3f20e0, .op = 0x1e012060 }, .id = HEX_INS_V6_VADDBNQ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2488,9 +2489,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff3f20e0, .op = 0x1e012000 }, .id = HEX_INS_V6_VADDBQ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2502,9 +2503,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f000000 }, .id = HEX_INS_V6_VADDBSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2516,9 +2517,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1ea00000 }, .id = HEX_INS_V6_VADDBSAT_DV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2530,10 +2531,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe02080, .op = 0x1ca02000 }, .id = HEX_INS_V6_VADDCARRY, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2545,10 +2546,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe02080, .op = 0x1da02000 }, .id = HEX_INS_V6_VADDCARRYO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 3 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'e', .syntax = 3 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2560,10 +2561,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe02080, .op = 0x1d802000 }, .id = HEX_INS_V6_VADDCARRYSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 's', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2575,9 +2576,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f002000 }, .id = HEX_INS_V6_VADDCLBH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2589,9 +2590,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f002020 }, .id = HEX_INS_V6_VADDCLBW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2603,9 +2604,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fa000e0 }, .id = HEX_INS_V6_VADDH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2617,9 +2618,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c6000a0 }, .id = HEX_INS_V6_VADDH_DV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2631,9 +2632,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff3f20e0, .op = 0x1e012080 }, .id = HEX_INS_V6_VADDHNQ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2645,9 +2646,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff3f20e0, .op = 0x1e012020 }, .id = HEX_INS_V6_VADDHQ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2659,9 +2660,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c400060 }, .id = HEX_INS_V6_VADDHSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2673,9 +2674,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c800020 }, .id = HEX_INS_V6_VADDHSAT_DV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2687,9 +2688,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1ca00080 }, .id = HEX_INS_V6_VADDHW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2701,9 +2702,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c202040 }, .id = HEX_INS_V6_VADDHW_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2715,9 +2716,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1ca00040 }, .id = HEX_INS_V6_VADDUBH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2729,9 +2730,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c4020a0 }, .id = HEX_INS_V6_VADDUBH_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2743,9 +2744,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c400020 }, .id = HEX_INS_V6_VADDUBSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2757,9 +2758,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c6000e0 }, .id = HEX_INS_V6_VADDUBSAT_DV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2771,9 +2772,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1ea00080 }, .id = HEX_INS_V6_VADDUBUBB_SAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2785,9 +2786,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c400040 }, .id = HEX_INS_V6_VADDUHSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2799,9 +2800,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c800000 }, .id = HEX_INS_V6_VADDUHSAT_DV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2813,9 +2814,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1ca00060 }, .id = HEX_INS_V6_VADDUHW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2827,9 +2828,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c402080 }, .id = HEX_INS_V6_VADDUHW_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2841,9 +2842,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f600020 }, .id = HEX_INS_V6_VADDUWSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2855,9 +2856,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1ea00040 }, .id = HEX_INS_V6_VADDUWSAT_DV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2869,9 +2870,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c400000 }, .id = HEX_INS_V6_VADDW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2883,9 +2884,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c6000c0 }, .id = HEX_INS_V6_VADDW_DV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2897,9 +2898,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff3f20e0, .op = 0x1e0120a0 }, .id = HEX_INS_V6_VADDWNQ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2911,9 +2912,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff3f20e0, .op = 0x1e012040 }, .id = HEX_INS_V6_VADDWQ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2925,9 +2926,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c400080 }, .id = HEX_INS_V6_VADDWSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2939,9 +2940,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c800040 }, .id = HEX_INS_V6_VADDWSAT_DV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2953,10 +2954,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff0020e0, .op = 0x1b000000 }, .id = HEX_INS_V6_VALIGNB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2968,10 +2969,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe02000, .op = 0x1e202000 }, .id = HEX_INS_V6_VALIGNBI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 5 } }, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 5 } }, .isa_id = 'u', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2983,9 +2984,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c2000a0 }, .id = HEX_INS_V6_VAND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -2997,9 +2998,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe03ce0, .op = 0x19a004a0 }, .id = HEX_INS_V6_VANDNQRT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'u', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3011,9 +3012,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe03ce0, .op = 0x19602460 }, .id = HEX_INS_V6_VANDNQRT_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3025,9 +3026,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe03ce0, .op = 0x19a000a0 }, .id = HEX_INS_V6_VANDQRT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'u', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3039,9 +3040,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe03ce0, .op = 0x19602060 }, .id = HEX_INS_V6_VANDQRT_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'u', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3053,9 +3054,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff3f20e0, .op = 0x1e032020 }, .id = HEX_INS_V6_VANDVNQV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'v', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3067,9 +3068,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff3f20e0, .op = 0x1e032000 }, .id = HEX_INS_V6_VANDVQV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'v', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3081,9 +3082,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x19a00048 }, .id = HEX_INS_V6_VANDVRT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3095,9 +3096,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x19602080 }, .id = HEX_INS_V6_VANDVRT_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3109,9 +3110,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19800000 }, .id = HEX_INS_V6_VASLH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3123,9 +3124,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19a020a0 }, .id = HEX_INS_V6_VASLH_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3137,9 +3138,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fa000a0 }, .id = HEX_INS_V6_VASLHV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3151,9 +3152,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x196000e0 }, .id = HEX_INS_V6_VASLW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3165,9 +3166,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19602040 }, .id = HEX_INS_V6_VASLW_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3179,9 +3180,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fa00080 }, .id = HEX_INS_V6_VASLWV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3193,9 +3194,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1aa020e0 }, .id = HEX_INS_V6_VASR_INTO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3207,9 +3208,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x196000c0 }, .id = HEX_INS_V6_VASRH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3221,9 +3222,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x198020e0 }, .id = HEX_INS_V6_VASRH_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3235,10 +3236,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff0020e0, .op = 0x1b002000 }, .id = HEX_INS_V6_VASRHBRNDSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3250,10 +3251,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff0020e0, .op = 0x18000000 }, .id = HEX_INS_V6_VASRHBSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3265,10 +3266,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff0020e0, .op = 0x1b0000e0 }, .id = HEX_INS_V6_VASRHUBRNDSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 't', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3280,10 +3281,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff0020e0, .op = 0x1b0000c0 }, .id = HEX_INS_V6_VASRHUBSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 't', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3295,9 +3296,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fa00060 }, .id = HEX_INS_V6_VASRHV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3309,10 +3310,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff0020e0, .op = 0x180000e0 }, .id = HEX_INS_V6_VASRUHUBRNDSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 't', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3324,10 +3325,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff0020e0, .op = 0x180020a0 }, .id = HEX_INS_V6_VASRUHUBSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 't', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3339,10 +3340,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff0020e0, .op = 0x18000020 }, .id = HEX_INS_V6_VASRUWUHRNDSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 't', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3354,10 +3355,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff0020e0, .op = 0x18002080 }, .id = HEX_INS_V6_VASRUWUHSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 't', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3369,9 +3370,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1d000060 }, .id = HEX_INS_V6_VASRVUHUBRNDSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3383,9 +3384,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1d000040 }, .id = HEX_INS_V6_VASRVUHUBSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3397,9 +3398,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1d000020 }, .id = HEX_INS_V6_VASRVWUHRNDSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3411,9 +3412,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1d000000 }, .id = HEX_INS_V6_VASRVWUHSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3425,9 +3426,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x196000a0 }, .id = HEX_INS_V6_VASRW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3439,9 +3440,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x196020a0 }, .id = HEX_INS_V6_VASRW_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3453,10 +3454,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff0020e0, .op = 0x1b000040 }, .id = HEX_INS_V6_VASRWH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3468,10 +3469,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff0020e0, .op = 0x1b000080 }, .id = HEX_INS_V6_VASRWHRNDSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3483,10 +3484,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff0020e0, .op = 0x1b000060 }, .id = HEX_INS_V6_VASRWHSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3498,10 +3499,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff0020e0, .op = 0x18000040 }, .id = HEX_INS_V6_VASRWUHRNDSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 't', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3513,10 +3514,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff0020e0, .op = 0x1b0000a0 }, .id = HEX_INS_V6_VASRWUHSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 't', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3528,9 +3529,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fa00000 }, .id = HEX_INS_V6_VASRWV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3542,8 +3543,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e0320e0 }, .id = HEX_INS_V6_VASSIGN, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 3 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 3 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3555,8 +3556,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e062020 }, .id = HEX_INS_V6_VASSIGN_FP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3568,8 +3569,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e0100c0 }, .id = HEX_INS_V6_VASSIGN_TMP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3581,9 +3582,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f002080 }, .id = HEX_INS_V6_VAVGB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3595,9 +3596,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f0020a0 }, .id = HEX_INS_V6_VAVGBRND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3609,9 +3610,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1cc000c0 }, .id = HEX_INS_V6_VAVGH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3623,9 +3624,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1ce000a0 }, .id = HEX_INS_V6_VAVGHRND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3637,9 +3638,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1cc00080 }, .id = HEX_INS_V6_VAVGUB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3651,9 +3652,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1ce00060 }, .id = HEX_INS_V6_VAVGUBRND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3665,9 +3666,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1cc000a0 }, .id = HEX_INS_V6_VAVGUH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3679,9 +3680,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1ce00080 }, .id = HEX_INS_V6_VAVGUHRND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3693,9 +3694,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f002040 }, .id = HEX_INS_V6_VAVGUW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3707,9 +3708,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f002060 }, .id = HEX_INS_V6_VAVGUWRND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3721,9 +3722,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1cc000e0 }, .id = HEX_INS_V6_VAVGW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3735,9 +3736,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1ce000c0 }, .id = HEX_INS_V6_VAVGWRND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3749,10 +3750,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe02080, .op = 0x1a600000 }, .id = HEX_INS_V6_VCCOMBINE, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 's', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 19 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -3765,8 +3766,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e0200e0 }, .id = HEX_INS_V6_VCL0H, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3778,8 +3779,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e0200a0 }, .id = HEX_INS_V6_VCL0W, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3791,9 +3792,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff2080, .op = 0x1a000000 }, .id = HEX_INS_V6_VCMOV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 's', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -3806,9 +3807,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f4000e0 }, .id = HEX_INS_V6_VCOMBINE, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3820,9 +3821,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1ea000e0 }, .id = HEX_INS_V6_VCOMBINE_TMP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3834,8 +3835,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e052040 }, .id = HEX_INS_V6_VCONV_H_HF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 5 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3847,8 +3848,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e052080 }, .id = HEX_INS_V6_VCONV_HF_H, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 6 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3860,8 +3861,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e042060 }, .id = HEX_INS_V6_VCONV_HF_QF16, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 6 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3873,8 +3874,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e0420c0 }, .id = HEX_INS_V6_VCONV_HF_QF32, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 6 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3886,8 +3887,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e042000 }, .id = HEX_INS_V6_VCONV_SF_QF32, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 6 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3899,8 +3900,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e052060 }, .id = HEX_INS_V6_VCONV_SF_W, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 6 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3912,8 +3913,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e052020 }, .id = HEX_INS_V6_VCONV_W_SF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 5 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3925,9 +3926,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fc020c0 }, .id = HEX_INS_V6_VCVT_B_HF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3939,9 +3940,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1d402060 }, .id = HEX_INS_V6_VCVT_BF_SF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3953,8 +3954,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e062000 }, .id = HEX_INS_V6_VCVT_H_HF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3966,8 +3967,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e042040 }, .id = HEX_INS_V6_VCVT_HF_B, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3979,8 +3980,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e0420e0 }, .id = HEX_INS_V6_VCVT_HF_H, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -3992,9 +3993,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f602020 }, .id = HEX_INS_V6_VCVT_HF_SF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4006,8 +4007,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e042020 }, .id = HEX_INS_V6_VCVT_HF_UB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4019,8 +4020,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e0420a0 }, .id = HEX_INS_V6_VCVT_HF_UH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4032,8 +4033,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e042080 }, .id = HEX_INS_V6_VCVT_SF_HF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4045,9 +4046,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fc020a0 }, .id = HEX_INS_V6_VCVT_UB_HF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4059,8 +4060,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e052000 }, .id = HEX_INS_V6_VCVT_UH_HF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4072,9 +4073,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19e02040 }, .id = HEX_INS_V6_VDEAL, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'y', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4086,8 +4087,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e0000e0 }, .id = HEX_INS_V6_VDEALB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4099,9 +4100,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f2000e0 }, .id = HEX_INS_V6_VDEALB4W, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4113,8 +4114,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e0000c0 }, .id = HEX_INS_V6_VDEALH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4126,10 +4127,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff0020e0, .op = 0x1b002080 }, .id = HEX_INS_V6_VDEALVDD, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4141,9 +4142,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f200020 }, .id = HEX_INS_V6_VDELTA, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4155,9 +4156,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fa020c0 }, .id = HEX_INS_V6_VDMPY_SF_HF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4169,9 +4170,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c402060 }, .id = HEX_INS_V6_VDMPY_SF_HF_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4183,9 +4184,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x190000c0 }, .id = HEX_INS_V6_VDMPYBUS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4197,9 +4198,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x190020c0 }, .id = HEX_INS_V6_VDMPYBUS_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4211,9 +4212,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x190000e0 }, .id = HEX_INS_V6_VDMPYBUS_DV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4225,9 +4226,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x190020e0 }, .id = HEX_INS_V6_VDMPYBUS_DV_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4239,9 +4240,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19000040 }, .id = HEX_INS_V6_VDMPYHB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4253,9 +4254,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19002060 }, .id = HEX_INS_V6_VDMPYHB_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4267,9 +4268,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19200080 }, .id = HEX_INS_V6_VDMPYHB_DV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4281,9 +4282,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19202080 }, .id = HEX_INS_V6_VDMPYHB_DV_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4295,9 +4296,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19200060 }, .id = HEX_INS_V6_VDMPYHISAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4309,9 +4310,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19202040 }, .id = HEX_INS_V6_VDMPYHISAT_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4323,9 +4324,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19200040 }, .id = HEX_INS_V6_VDMPYHSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4337,9 +4338,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19202060 }, .id = HEX_INS_V6_VDMPYHSAT_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4351,9 +4352,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19200020 }, .id = HEX_INS_V6_VDMPYHSUISAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4365,9 +4366,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19202020 }, .id = HEX_INS_V6_VDMPYHSUISAT_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4379,9 +4380,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19200000 }, .id = HEX_INS_V6_VDMPYHSUSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4393,9 +4394,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19202000 }, .id = HEX_INS_V6_VDMPYHSUSAT_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4407,9 +4408,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c000060 }, .id = HEX_INS_V6_VDMPYHVSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4421,9 +4422,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c002060 }, .id = HEX_INS_V6_VDMPYHVSAT_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4435,9 +4436,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x190000a0 }, .id = HEX_INS_V6_VDSADUH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4449,9 +4450,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19602000 }, .id = HEX_INS_V6_VDSADUH_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4463,9 +4464,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1f800000 }, .id = HEX_INS_V6_VEQB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4477,9 +4478,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c802000 }, .id = HEX_INS_V6_VEQB_AND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4491,9 +4492,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c802040 }, .id = HEX_INS_V6_VEQB_OR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4505,9 +4506,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c802080 }, .id = HEX_INS_V6_VEQB_XOR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4519,9 +4520,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1f800004 }, .id = HEX_INS_V6_VEQH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4533,9 +4534,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c802004 }, .id = HEX_INS_V6_VEQH_AND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4547,9 +4548,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c802044 }, .id = HEX_INS_V6_VEQH_OR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4561,9 +4562,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c802084 }, .id = HEX_INS_V6_VEQH_XOR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4575,9 +4576,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1f800008 }, .id = HEX_INS_V6_VEQW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4589,9 +4590,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c802008 }, .id = HEX_INS_V6_VEQW_AND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4603,9 +4604,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c802048 }, .id = HEX_INS_V6_VEQW_OR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4617,9 +4618,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c802088 }, .id = HEX_INS_V6_VEQW_XOR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4631,9 +4632,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c602040 }, .id = HEX_INS_V6_VFMAX_HF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4645,9 +4646,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c602060 }, .id = HEX_INS_V6_VFMAX_SF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4659,9 +4660,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c602000 }, .id = HEX_INS_V6_VFMIN_HF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4673,9 +4674,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c602020 }, .id = HEX_INS_V6_VFMIN_SF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4687,8 +4688,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e062040 }, .id = HEX_INS_V6_VFNEG_HF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4700,8 +4701,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e062060 }, .id = HEX_INS_V6_VFNEG_SF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4713,9 +4714,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1f800010 }, .id = HEX_INS_V6_VGTB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4727,9 +4728,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c802010 }, .id = HEX_INS_V6_VGTB_AND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4741,9 +4742,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c802050 }, .id = HEX_INS_V6_VGTB_OR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4755,9 +4756,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c802090 }, .id = HEX_INS_V6_VGTB_XOR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4769,9 +4770,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c802078 }, .id = HEX_INS_V6_VGTBF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4783,9 +4784,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c8020d0 }, .id = HEX_INS_V6_VGTBF_AND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4797,9 +4798,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c802038 }, .id = HEX_INS_V6_VGTBF_OR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4811,9 +4812,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c8020f0 }, .id = HEX_INS_V6_VGTBF_XOR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4825,9 +4826,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1f800014 }, .id = HEX_INS_V6_VGTH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4839,9 +4840,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c802014 }, .id = HEX_INS_V6_VGTH_AND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4853,9 +4854,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c802054 }, .id = HEX_INS_V6_VGTH_OR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4867,9 +4868,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c802094 }, .id = HEX_INS_V6_VGTH_XOR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4881,9 +4882,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c802074 }, .id = HEX_INS_V6_VGTHF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4895,9 +4896,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c8020cc }, .id = HEX_INS_V6_VGTHF_AND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4909,9 +4910,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c802034 }, .id = HEX_INS_V6_VGTHF_OR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4923,9 +4924,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c8020ec }, .id = HEX_INS_V6_VGTHF_XOR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4937,9 +4938,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c802070 }, .id = HEX_INS_V6_VGTSF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4951,9 +4952,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c8020c8 }, .id = HEX_INS_V6_VGTSF_AND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4965,9 +4966,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c802030 }, .id = HEX_INS_V6_VGTSF_OR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4979,9 +4980,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c8020e8 }, .id = HEX_INS_V6_VGTSF_XOR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -4993,9 +4994,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1f800020 }, .id = HEX_INS_V6_VGTUB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5007,9 +5008,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c802020 }, .id = HEX_INS_V6_VGTUB_AND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5021,9 +5022,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c802060 }, .id = HEX_INS_V6_VGTUB_OR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5035,9 +5036,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c8020a0 }, .id = HEX_INS_V6_VGTUB_XOR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5049,9 +5050,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1f800024 }, .id = HEX_INS_V6_VGTUH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5063,9 +5064,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c802024 }, .id = HEX_INS_V6_VGTUH_AND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5077,9 +5078,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c802064 }, .id = HEX_INS_V6_VGTUH_OR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5091,9 +5092,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c8020a4 }, .id = HEX_INS_V6_VGTUH_XOR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5105,9 +5106,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1f800028 }, .id = HEX_INS_V6_VGTUW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5119,9 +5120,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c802028 }, .id = HEX_INS_V6_VGTUW_AND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5133,9 +5134,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c802068 }, .id = HEX_INS_V6_VGTUW_OR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5147,9 +5148,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c8020a8 }, .id = HEX_INS_V6_VGTUW_XOR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5161,9 +5162,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1f800018 }, .id = HEX_INS_V6_VGTW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5175,9 +5176,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c802018 }, .id = HEX_INS_V6_VGTW_AND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5189,9 +5190,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c802058 }, .id = HEX_INS_V6_VGTW_OR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5203,9 +5204,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020fc, .op = 0x1c802098 }, .id = HEX_INS_V6_VGTW_XOR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5226,7 +5227,7 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff3f3fff, .op = 0x1e022080 }, .id = HEX_INS_V6_VHISTQ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'v', .syntax = 6 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5238,8 +5239,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x19a02020 }, .id = HEX_INS_V6_VINSERTWR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5251,10 +5252,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff0020e0, .op = 0x1b000020 }, .id = HEX_INS_V6_VLALIGNB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5266,10 +5267,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe02000, .op = 0x1e602000 }, .id = HEX_INS_V6_VLALIGNBI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 5 } }, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 5 } }, .isa_id = 'u', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5281,9 +5282,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19800060 }, .id = HEX_INS_V6_VLSRB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5295,9 +5296,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19800040 }, .id = HEX_INS_V6_VLSRH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5309,9 +5310,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fa00040 }, .id = HEX_INS_V6_VLSRHV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5323,9 +5324,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19800020 }, .id = HEX_INS_V6_VLSRW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5337,9 +5338,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fa00020 }, .id = HEX_INS_V6_VLSRWV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5351,9 +5352,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19600080 }, .id = HEX_INS_V6_VLUT4, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5365,10 +5366,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff0020e0, .op = 0x1b002020 }, .id = HEX_INS_V6_VLUTVVB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 't', .syntax = 18 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5380,10 +5381,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff0020e0, .op = 0x18000060 }, .id = HEX_INS_V6_VLUTVVB_NM, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 't', .syntax = 18 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5395,10 +5396,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff0020e0, .op = 0x1b0020a0 }, .id = HEX_INS_V6_VLUTVVB_ORACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 't', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5410,10 +5411,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe02000, .op = 0x1cc02000 }, .id = HEX_INS_V6_VLUTVVB_ORACCI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 5 } }, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 5 } }, .isa_id = 'u', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5425,10 +5426,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe02000, .op = 0x1e200000 }, .id = HEX_INS_V6_VLUTVVBI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 5 } }, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 5 } }, .isa_id = 'u', .syntax = 18 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5440,10 +5441,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff0020e0, .op = 0x1b0020c0 }, .id = HEX_INS_V6_VLUTVWH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 't', .syntax = 18 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5455,10 +5456,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff0020e0, .op = 0x18000080 }, .id = HEX_INS_V6_VLUTVWH_NM, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 't', .syntax = 18 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5470,10 +5471,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff0020e0, .op = 0x1b0020e0 }, .id = HEX_INS_V6_VLUTVWH_ORACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 't', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5485,10 +5486,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe02000, .op = 0x1ce02000 }, .id = HEX_INS_V6_VLUTVWH_ORACCI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 5 } }, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 5 } }, .isa_id = 'u', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5500,10 +5501,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe02000, .op = 0x1e600000 }, .id = HEX_INS_V6_VLUTVWHI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 5 } }, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 5 } }, .isa_id = 'u', .syntax = 18 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5515,9 +5516,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1d4020e0 }, .id = HEX_INS_V6_VMAX_BF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5529,9 +5530,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fc02060 }, .id = HEX_INS_V6_VMAX_HF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5543,9 +5544,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fc02020 }, .id = HEX_INS_V6_VMAX_SF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5557,9 +5558,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f2000a0 }, .id = HEX_INS_V6_VMAXB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5571,9 +5572,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f0000e0 }, .id = HEX_INS_V6_VMAXH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5585,9 +5586,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f0000a0 }, .id = HEX_INS_V6_VMAXUB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5599,9 +5600,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f0000c0 }, .id = HEX_INS_V6_VMAXUH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5613,9 +5614,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f200000 }, .id = HEX_INS_V6_VMAXW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5627,9 +5628,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1d402000 }, .id = HEX_INS_V6_VMIN_BF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5641,9 +5642,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fc02080 }, .id = HEX_INS_V6_VMIN_HF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5655,9 +5656,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fc02040 }, .id = HEX_INS_V6_VMIN_SF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5669,9 +5670,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f200080 }, .id = HEX_INS_V6_VMINB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5683,9 +5684,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f000060 }, .id = HEX_INS_V6_VMINH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5697,9 +5698,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f000020 }, .id = HEX_INS_V6_VMINUB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5711,9 +5712,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f000040 }, .id = HEX_INS_V6_VMINUH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5725,9 +5726,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f000080 }, .id = HEX_INS_V6_VMINW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5739,9 +5740,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x192000c0 }, .id = HEX_INS_V6_VMPABUS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5753,9 +5754,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x192020c0 }, .id = HEX_INS_V6_VMPABUS_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5767,9 +5768,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c200060 }, .id = HEX_INS_V6_VMPABUSV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'v', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5781,9 +5782,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19600060 }, .id = HEX_INS_V6_VMPABUU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5795,9 +5796,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19a02080 }, .id = HEX_INS_V6_VMPABUU_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5809,9 +5810,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1ce000e0 }, .id = HEX_INS_V6_VMPABUUV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'v', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5823,9 +5824,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x192000e0 }, .id = HEX_INS_V6_VMPAHB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5837,9 +5838,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x192020e0 }, .id = HEX_INS_V6_VMPAHB_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5851,10 +5852,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19802080 }, .id = HEX_INS_V6_VMPAHHSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5866,9 +5867,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x198000a0 }, .id = HEX_INS_V6_VMPAUHB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5880,9 +5881,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19802040 }, .id = HEX_INS_V6_VMPAUHB_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5894,10 +5895,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x198020a0 }, .id = HEX_INS_V6_VMPAUHUHSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5909,10 +5910,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x198020c0 }, .id = HEX_INS_V6_VMPSUHUHSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5924,9 +5925,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f802060 }, .id = HEX_INS_V6_VMPY_HF_HF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5938,9 +5939,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c402040 }, .id = HEX_INS_V6_VMPY_HF_HF_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5952,9 +5953,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fe02060 }, .id = HEX_INS_V6_VMPY_QF16, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5966,9 +5967,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fe02080 }, .id = HEX_INS_V6_VMPY_QF16_HF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5980,9 +5981,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fe020a0 }, .id = HEX_INS_V6_VMPY_QF16_MIX_HF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -5994,9 +5995,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fe02000 }, .id = HEX_INS_V6_VMPY_QF32, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6008,9 +6009,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fe020e0 }, .id = HEX_INS_V6_VMPY_QF32_HF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6022,9 +6023,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f802000 }, .id = HEX_INS_V6_VMPY_QF32_MIX_HF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6036,9 +6037,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fe020c0 }, .id = HEX_INS_V6_VMPY_QF32_QF16, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6050,9 +6051,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fe02020 }, .id = HEX_INS_V6_VMPY_QF32_SF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6064,9 +6065,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1d402080 }, .id = HEX_INS_V6_VMPY_SF_BF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6078,9 +6079,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1d002000 }, .id = HEX_INS_V6_VMPY_SF_BF_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6092,9 +6093,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f802040 }, .id = HEX_INS_V6_VMPY_SF_HF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6106,9 +6107,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c402020 }, .id = HEX_INS_V6_VMPY_SF_HF_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6120,9 +6121,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f802020 }, .id = HEX_INS_V6_VMPY_SF_SF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6134,9 +6135,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x192000a0 }, .id = HEX_INS_V6_VMPYBUS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6148,9 +6149,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x192020a0 }, .id = HEX_INS_V6_VMPYBUS_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6162,9 +6163,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c0000c0 }, .id = HEX_INS_V6_VMPYBUSV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6176,9 +6177,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c0020c0 }, .id = HEX_INS_V6_VMPYBUSV_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6190,9 +6191,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c000080 }, .id = HEX_INS_V6_VMPYBV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6204,9 +6205,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c002080 }, .id = HEX_INS_V6_VMPYBV_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6218,9 +6219,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fe000a0 }, .id = HEX_INS_V6_VMPYEWUH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6232,9 +6233,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1ea000c0 }, .id = HEX_INS_V6_VMPYEWUH_64, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6246,9 +6247,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19400000 }, .id = HEX_INS_V6_VMPYH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6260,9 +6261,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19a020c0 }, .id = HEX_INS_V6_VMPYH_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6274,9 +6275,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19402000 }, .id = HEX_INS_V6_VMPYHSAT_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6288,9 +6289,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19400040 }, .id = HEX_INS_V6_VMPYHSRS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6302,9 +6303,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19400020 }, .id = HEX_INS_V6_VMPYHSS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6316,9 +6317,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c200040 }, .id = HEX_INS_V6_VMPYHUS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6330,9 +6331,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c202020 }, .id = HEX_INS_V6_VMPYHUS_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6344,9 +6345,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c0000e0 }, .id = HEX_INS_V6_VMPYHV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6358,9 +6359,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c0020e0 }, .id = HEX_INS_V6_VMPYHV_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6372,9 +6373,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c200020 }, .id = HEX_INS_V6_VMPYHVSRS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6386,9 +6387,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f600000 }, .id = HEX_INS_V6_VMPYIEOH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6400,9 +6401,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c402000 }, .id = HEX_INS_V6_VMPYIEWH_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6414,9 +6415,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fc00000 }, .id = HEX_INS_V6_VMPYIEWUH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6428,9 +6429,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c2020a0 }, .id = HEX_INS_V6_VMPYIEWUH_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6442,9 +6443,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c200080 }, .id = HEX_INS_V6_VMPYIH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6456,9 +6457,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c202080 }, .id = HEX_INS_V6_VMPYIH_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6470,9 +6471,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19600000 }, .id = HEX_INS_V6_VMPYIHB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6484,9 +6485,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19602020 }, .id = HEX_INS_V6_VMPYIHB_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6498,9 +6499,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fc00020 }, .id = HEX_INS_V6_VMPYIOWH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6512,9 +6513,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19a00000 }, .id = HEX_INS_V6_VMPYIWB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6526,9 +6527,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19402040 }, .id = HEX_INS_V6_VMPYIWB_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6540,9 +6541,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x198000e0 }, .id = HEX_INS_V6_VMPYIWH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6554,9 +6555,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19402060 }, .id = HEX_INS_V6_VMPYIWH_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6568,9 +6569,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x198000c0 }, .id = HEX_INS_V6_VMPYIWUB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6582,9 +6583,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19802020 }, .id = HEX_INS_V6_VMPYIWUB_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6596,9 +6597,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fe000e0 }, .id = HEX_INS_V6_VMPYOWH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6610,9 +6611,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c202060 }, .id = HEX_INS_V6_VMPYOWH_64_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6624,9 +6625,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f400000 }, .id = HEX_INS_V6_VMPYOWH_RND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6638,9 +6639,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c2020e0 }, .id = HEX_INS_V6_VMPYOWH_RND_SACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6652,9 +6653,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c2020c0 }, .id = HEX_INS_V6_VMPYOWH_SACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6666,9 +6667,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19c00000 }, .id = HEX_INS_V6_VMPYUB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6680,9 +6681,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19802000 }, .id = HEX_INS_V6_VMPYUB_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6694,9 +6695,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c0000a0 }, .id = HEX_INS_V6_VMPYUBV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6708,9 +6709,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c0020a0 }, .id = HEX_INS_V6_VMPYUBV_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6722,9 +6723,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19400060 }, .id = HEX_INS_V6_VMPYUH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6736,9 +6737,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19402020 }, .id = HEX_INS_V6_VMPYUH_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6750,9 +6751,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19600040 }, .id = HEX_INS_V6_VMPYUHE, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6764,9 +6765,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19802060 }, .id = HEX_INS_V6_VMPYUHE_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6778,9 +6779,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c200000 }, .id = HEX_INS_V6_VMPYUHV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6792,9 +6793,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c202000 }, .id = HEX_INS_V6_VMPYUHV_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6806,9 +6807,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fc020e0 }, .id = HEX_INS_V6_VMPYUHVS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6820,10 +6821,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe02080, .op = 0x1ee02000 }, .id = HEX_INS_V6_VMUX, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 't', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6835,9 +6836,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f0020c0 }, .id = HEX_INS_V6_VNAVGB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6849,9 +6850,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1ce00020 }, .id = HEX_INS_V6_VNAVGH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6863,9 +6864,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1ce00000 }, .id = HEX_INS_V6_VNAVGUB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6877,9 +6878,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1ce00040 }, .id = HEX_INS_V6_VNAVGW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6891,10 +6892,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe02080, .op = 0x1a400000 }, .id = HEX_INS_V6_VNCCOMBINE, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 20 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_VEC_FALSE, @@ -6907,9 +6908,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff2080, .op = 0x1a200000 }, .id = HEX_INS_V6_VNCMOV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_VEC_FALSE, @@ -6922,8 +6923,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e0300a0 }, .id = HEX_INS_V6_VNORMAMTH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6935,8 +6936,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e030080 }, .id = HEX_INS_V6_VNORMAMTW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6948,8 +6949,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e000080 }, .id = HEX_INS_V6_VNOT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6961,9 +6962,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c2000c0 }, .id = HEX_INS_V6_VOR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6975,9 +6976,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fc00040 }, .id = HEX_INS_V6_VPACKEB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -6989,9 +6990,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fc00060 }, .id = HEX_INS_V6_VPACKEH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7003,9 +7004,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fc000c0 }, .id = HEX_INS_V6_VPACKHB_SAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7017,9 +7018,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fc000a0 }, .id = HEX_INS_V6_VPACKHUB_SAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7031,9 +7032,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fe00020 }, .id = HEX_INS_V6_VPACKOB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7045,9 +7046,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fe00040 }, .id = HEX_INS_V6_VPACKOH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7059,9 +7060,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fe00000 }, .id = HEX_INS_V6_VPACKWH_SAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7073,9 +7074,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fc000e0 }, .id = HEX_INS_V6_VPACKWUH_SAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7087,8 +7088,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e0200c0 }, .id = HEX_INS_V6_VPOPCOUNTH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7100,8 +7101,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff3f3fe0, .op = 0x1e032040 }, .id = HEX_INS_V6_VPREFIXQB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7113,8 +7114,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff3f3fe0, .op = 0x1e032140 }, .id = HEX_INS_V6_VPREFIXQH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7126,8 +7127,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff3f3fe0, .op = 0x1e032240 }, .id = HEX_INS_V6_VPREFIXQW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7139,9 +7140,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f200060 }, .id = HEX_INS_V6_VRDELTA, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7153,9 +7154,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19c000a0 }, .id = HEX_INS_V6_VRMPYBUB_RTT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7167,9 +7168,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19a02000 }, .id = HEX_INS_V6_VRMPYBUB_RTT_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7181,9 +7182,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19000080 }, .id = HEX_INS_V6_VRMPYBUS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7195,9 +7196,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x190020a0 }, .id = HEX_INS_V6_VRMPYBUS_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7209,10 +7210,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020c0, .op = 0x19400080 }, .id = HEX_INS_V6_VRMPYBUSI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 5 } }, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 5 } }, .isa_id = 'u', .syntax = 18 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7224,10 +7225,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020c0, .op = 0x19402080 }, .id = HEX_INS_V6_VRMPYBUSI_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 5 } }, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 5 } }, .isa_id = 'u', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7239,9 +7240,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c000040 }, .id = HEX_INS_V6_VRMPYBUSV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7253,9 +7254,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c002040 }, .id = HEX_INS_V6_VRMPYBUSV_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7267,9 +7268,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c000020 }, .id = HEX_INS_V6_VRMPYBV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7281,9 +7282,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c002020 }, .id = HEX_INS_V6_VRMPYBV_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7295,9 +7296,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19000060 }, .id = HEX_INS_V6_VRMPYUB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7309,9 +7310,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19002080 }, .id = HEX_INS_V6_VRMPYUB_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7323,9 +7324,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19c00080 }, .id = HEX_INS_V6_VRMPYUB_RTT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7337,9 +7338,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19a020e0 }, .id = HEX_INS_V6_VRMPYUB_RTT_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7351,10 +7352,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020c0, .op = 0x19a000c0 }, .id = HEX_INS_V6_VRMPYUBI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 5 } }, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 5 } }, .isa_id = 'u', .syntax = 20 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7366,10 +7367,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020c0, .op = 0x196020c0 }, .id = HEX_INS_V6_VRMPYUBI_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 5 } }, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 5 } }, .isa_id = 'u', .syntax = 21 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7381,9 +7382,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c000000 }, .id = HEX_INS_V6_VRMPYUBV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7395,9 +7396,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c002000 }, .id = HEX_INS_V6_VRMPYUBV_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7409,9 +7410,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xfff820e0, .op = 0x19e80000 }, .id = HEX_INS_V6_VRMPYZBB_RT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7423,9 +7424,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xfff820e0, .op = 0x19c02040 }, .id = HEX_INS_V6_VRMPYZBB_RT_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .isa_id = 'y', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7437,9 +7438,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xfff820e0, .op = 0x19e00000 }, .id = HEX_INS_V6_VRMPYZBB_RX, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 'x', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7451,9 +7452,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xfff820e0, .op = 0x19c82040 }, .id = HEX_INS_V6_VRMPYZBB_RX_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .isa_id = 'y', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 'x', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7465,9 +7466,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xfff820e0, .op = 0x19f80040 }, .id = HEX_INS_V6_VRMPYZBUB_RT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7479,9 +7480,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xfff820e0, .op = 0x19d02020 }, .id = HEX_INS_V6_VRMPYZBUB_RT_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .isa_id = 'y', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7493,9 +7494,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xfff820e0, .op = 0x19f00040 }, .id = HEX_INS_V6_VRMPYZBUB_RX, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 'x', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7507,9 +7508,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xfff820e0, .op = 0x19d82020 }, .id = HEX_INS_V6_VRMPYZBUB_RX_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .isa_id = 'y', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 'x', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7521,9 +7522,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xfff820e0, .op = 0x19e80020 }, .id = HEX_INS_V6_VRMPYZCB_RT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 't', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7535,9 +7536,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xfff820e0, .op = 0x19c02060 }, .id = HEX_INS_V6_VRMPYZCB_RT_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .isa_id = 'y', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 't', .syntax = 18 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7549,9 +7550,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xfff820e0, .op = 0x19e00020 }, .id = HEX_INS_V6_VRMPYZCB_RX, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 'x', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7563,9 +7564,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xfff820e0, .op = 0x19c82060 }, .id = HEX_INS_V6_VRMPYZCB_RX_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .isa_id = 'y', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 'x', .syntax = 18 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7577,9 +7578,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xfff820e0, .op = 0x19e80040 }, .id = HEX_INS_V6_VRMPYZCBS_RT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 't', .syntax = 18 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7591,9 +7592,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xfff820e0, .op = 0x19c02020 }, .id = HEX_INS_V6_VRMPYZCBS_RT_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .isa_id = 'y', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 't', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7605,9 +7606,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xfff820e0, .op = 0x19e00040 }, .id = HEX_INS_V6_VRMPYZCBS_RX, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 'x', .syntax = 18 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7619,9 +7620,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xfff820e0, .op = 0x19c82020 }, .id = HEX_INS_V6_VRMPYZCBS_RX_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .isa_id = 'y', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 'x', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7633,9 +7634,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xfff820e0, .op = 0x19f80000 }, .id = HEX_INS_V6_VRMPYZNB_RT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7647,9 +7648,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xfff820e0, .op = 0x19d02040 }, .id = HEX_INS_V6_VRMPYZNB_RT_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .isa_id = 'y', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 't', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7661,9 +7662,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xfff820e0, .op = 0x19f00000 }, .id = HEX_INS_V6_VRMPYZNB_RX, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 'x', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7675,9 +7676,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xfff820e0, .op = 0x19d82040 }, .id = HEX_INS_V6_VRMPYZNB_RX_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_QUADRUPLE, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VQR, .isa_id = 'y', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 'x', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7689,9 +7690,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19600020 }, .id = HEX_INS_V6_VROR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7703,9 +7704,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1a8020e0 }, .id = HEX_INS_V6_VROTR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7717,9 +7718,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f6000c0 }, .id = HEX_INS_V6_VROUNDHB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7731,9 +7732,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f6000e0 }, .id = HEX_INS_V6_VROUNDHUB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7745,9 +7746,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fe00060 }, .id = HEX_INS_V6_VROUNDUHUB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7759,9 +7760,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fe00080 }, .id = HEX_INS_V6_VROUNDUWUH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7773,9 +7774,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f600080 }, .id = HEX_INS_V6_VROUNDWH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7787,9 +7788,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f6000a0 }, .id = HEX_INS_V6_VROUNDWUH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7801,10 +7802,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020c0, .op = 0x194000c0 }, .id = HEX_INS_V6_VRSADUBI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 5 } }, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 5 } }, .isa_id = 'u', .syntax = 20 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7816,10 +7817,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020c0, .op = 0x194020c0 }, .id = HEX_INS_V6_VRSADUBI_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 5 } }, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 5 } }, .isa_id = 'u', .syntax = 21 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7831,9 +7832,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1d8020e0 }, .id = HEX_INS_V6_VSATDW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7845,9 +7846,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f600040 }, .id = HEX_INS_V6_VSATHUB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7859,9 +7860,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f2000c0 }, .id = HEX_INS_V6_VSATUWUH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7873,9 +7874,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f600060 }, .id = HEX_INS_V6_VSATWH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7887,8 +7888,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e020060 }, .id = HEX_INS_V6_VSB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7900,8 +7901,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e020080 }, .id = HEX_INS_V6_VSH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7913,9 +7914,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f400060 }, .id = HEX_INS_V6_VSHUFEH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7927,9 +7928,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19e02020 }, .id = HEX_INS_V6_VSHUFF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'y', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7941,8 +7942,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e020000 }, .id = HEX_INS_V6_VSHUFFB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7954,9 +7955,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f400020 }, .id = HEX_INS_V6_VSHUFFEB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7968,8 +7969,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e0100e0 }, .id = HEX_INS_V6_VSHUFFH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7981,9 +7982,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f400040 }, .id = HEX_INS_V6_VSHUFFOB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -7995,10 +7996,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff0020e0, .op = 0x1b002060 }, .id = HEX_INS_V6_VSHUFFVDD, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 19 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS_LOW8, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8010,9 +8011,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f4000c0 }, .id = HEX_INS_V6_VSHUFOEB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8024,9 +8025,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f4000a0 }, .id = HEX_INS_V6_VSHUFOEH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8038,9 +8039,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f400080 }, .id = HEX_INS_V6_VSHUFOH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8052,9 +8053,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f6020c0 }, .id = HEX_INS_V6_VSUB_HF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8066,9 +8067,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f602000 }, .id = HEX_INS_V6_VSUB_HF_HF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8080,9 +8081,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f6020a0 }, .id = HEX_INS_V6_VSUB_QF16, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8094,9 +8095,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f6020e0 }, .id = HEX_INS_V6_VSUB_QF16_MIX, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8108,9 +8109,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fa02060 }, .id = HEX_INS_V6_VSUB_QF32, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8122,9 +8123,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fa020a0 }, .id = HEX_INS_V6_VSUB_QF32_MIX, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8136,9 +8137,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fa02080 }, .id = HEX_INS_V6_VSUB_SF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8150,9 +8151,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1d4020a0 }, .id = HEX_INS_V6_VSUB_SF_BF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8164,9 +8165,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f8020a0 }, .id = HEX_INS_V6_VSUB_SF_HF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8178,9 +8179,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f8020e0 }, .id = HEX_INS_V6_VSUB_SF_SF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8192,9 +8193,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c4000a0 }, .id = HEX_INS_V6_VSUBB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8206,9 +8207,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c800060 }, .id = HEX_INS_V6_VSUBB_DV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8220,9 +8221,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff3f20e0, .op = 0x1e022020 }, .id = HEX_INS_V6_VSUBBNQ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8234,9 +8235,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff3f20e0, .op = 0x1e0120c0 }, .id = HEX_INS_V6_VSUBBQ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8248,9 +8249,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1f200040 }, .id = HEX_INS_V6_VSUBBSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8262,9 +8263,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1ea00020 }, .id = HEX_INS_V6_VSUBBSAT_DV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8276,10 +8277,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe02080, .op = 0x1ca02080 }, .id = HEX_INS_V6_VSUBCARRY, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'x', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8291,10 +8292,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe02080, .op = 0x1da02080 }, .id = HEX_INS_V6_VSUBCARRYO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 3 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'e', .syntax = 3 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8306,9 +8307,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c4000c0 }, .id = HEX_INS_V6_VSUBH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8320,9 +8321,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c800080 }, .id = HEX_INS_V6_VSUBH_DV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8334,9 +8335,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff3f20e0, .op = 0x1e022040 }, .id = HEX_INS_V6_VSUBHNQ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8348,9 +8349,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff3f20e0, .op = 0x1e0120e0 }, .id = HEX_INS_V6_VSUBHQ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8362,9 +8363,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c600040 }, .id = HEX_INS_V6_VSUBHSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8376,9 +8377,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1ca00000 }, .id = HEX_INS_V6_VSUBHSAT_DV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8390,9 +8391,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1ca000e0 }, .id = HEX_INS_V6_VSUBHW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8404,9 +8405,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1ca000a0 }, .id = HEX_INS_V6_VSUBUBH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8418,9 +8419,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c600000 }, .id = HEX_INS_V6_VSUBUBSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8432,9 +8433,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c8000c0 }, .id = HEX_INS_V6_VSUBUBSAT_DV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8446,9 +8447,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1ea000a0 }, .id = HEX_INS_V6_VSUBUBUBB_SAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8460,9 +8461,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c600020 }, .id = HEX_INS_V6_VSUBUHSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8474,9 +8475,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c8000e0 }, .id = HEX_INS_V6_VSUBUHSAT_DV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8488,9 +8489,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1ca000c0 }, .id = HEX_INS_V6_VSUBUHW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8502,9 +8503,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1fc00080 }, .id = HEX_INS_V6_VSUBUWSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8516,9 +8517,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1ea00060 }, .id = HEX_INS_V6_VSUBUWSAT_DV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'v', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8530,9 +8531,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c4000e0 }, .id = HEX_INS_V6_VSUBW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8544,9 +8545,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c8000a0 }, .id = HEX_INS_V6_VSUBW_DV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8558,9 +8559,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff3f20e0, .op = 0x1e022060 }, .id = HEX_INS_V6_VSUBWNQ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8572,9 +8573,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff3f20e0, .op = 0x1e022000 }, .id = HEX_INS_V6_VSUBWQ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'x', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8586,9 +8587,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c600060 }, .id = HEX_INS_V6_VSUBWSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8600,9 +8601,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1ca00020 }, .id = HEX_INS_V6_VSUBWSAT_DV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'v', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8614,10 +8615,10 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe02080, .op = 0x1ea02000 }, .id = HEX_INS_V6_VSWAP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 't', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8629,9 +8630,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19000000 }, .id = HEX_INS_V6_VTMPYB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8643,9 +8644,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19002000 }, .id = HEX_INS_V6_VTMPYB_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8657,9 +8658,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19000020 }, .id = HEX_INS_V6_VTMPYBUS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8671,9 +8672,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19002020 }, .id = HEX_INS_V6_VTMPYBUS_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8685,9 +8686,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19a00080 }, .id = HEX_INS_V6_VTMPYHB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8699,9 +8700,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x19002040 }, .id = HEX_INS_V6_VTMPYHB_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8713,8 +8714,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e010040 }, .id = HEX_INS_V6_VUNPACKB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8726,8 +8727,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e010060 }, .id = HEX_INS_V6_VUNPACKH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8739,8 +8740,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e002000 }, .id = HEX_INS_V6_VUNPACKOB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8752,8 +8753,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e002020 }, .id = HEX_INS_V6_VUNPACKOH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8765,8 +8766,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e010000 }, .id = HEX_INS_V6_VUNPACKUB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8778,8 +8779,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e010020 }, .id = HEX_INS_V6_VUNPACKUH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8800,7 +8801,7 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff3eff, .op = 0x1e002680 }, .id = HEX_INS_V6_VWHIST128M, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 8 } }, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 8 } }, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8812,7 +8813,7 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff3f3fff, .op = 0x1e022480 }, .id = HEX_INS_V6_VWHIST128Q, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'v', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8824,8 +8825,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff3f3eff, .op = 0x1e022680 }, .id = HEX_INS_V6_VWHIST128QM, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 8 } }, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'v', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 8 } }, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8855,7 +8856,7 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff3f3fff, .op = 0x1e022280 }, .id = HEX_INS_V6_VWHIST256Q, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'v', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8867,7 +8868,7 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xff3f3fff, .op = 0x1e022380 }, .id = HEX_INS_V6_VWHIST256Q_SAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 22 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'v', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8879,9 +8880,9 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe020e0, .op = 0x1c2000e0 }, .id = HEX_INS_V6_VXOR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8893,8 +8894,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e020020 }, .id = HEX_INS_V6_VZB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8906,8 +8907,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffff20e0, .op = 0x1e020040 }, .id = HEX_INS_V6_VZH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8919,8 +8920,8 @@ static const HexInsnTemplate templates_normal_0x1[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x19a00120 }, .id = HEX_INS_V6_ZEXTRACT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -8936,9 +8937,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc82001, .op = 0x20400000 }, .id = HEX_INS_J4_CMPEQ_F_JUMPNV_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 28 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 28 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -8951,9 +8952,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc82001, .op = 0x20402000 }, .id = HEX_INS_J4_CMPEQ_F_JUMPNV_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 27 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 27 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -8966,9 +8967,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc82001, .op = 0x20000000 }, .id = HEX_INS_J4_CMPEQ_T_JUMPNV_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 27 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 27 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -8981,9 +8982,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc82001, .op = 0x20002000 }, .id = HEX_INS_J4_CMPEQ_T_JUMPNV_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 26 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 26 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -8996,9 +8997,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc82001, .op = 0x24400000 }, .id = HEX_INS_J4_CMPEQI_F_JUMPNV_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 28 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 28 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -9011,9 +9012,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc82001, .op = 0x24402000 }, .id = HEX_INS_J4_CMPEQI_F_JUMPNV_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 27 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 27 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -9026,9 +9027,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc82001, .op = 0x24000000 }, .id = HEX_INS_J4_CMPEQI_T_JUMPNV_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 27 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 27 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -9041,9 +9042,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc82001, .op = 0x24002000 }, .id = HEX_INS_J4_CMPEQI_T_JUMPNV_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 26 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 26 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -9056,9 +9057,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc83f01, .op = 0x26400000 }, .id = HEX_INS_J4_CMPEQN1_F_JUMPNV_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, { .info = HEX_OP_TEMPLATE_TYPE_IMM_CONST, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 28 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 28 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -9071,9 +9072,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc83f01, .op = 0x26402000 }, .id = HEX_INS_J4_CMPEQN1_F_JUMPNV_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, { .info = HEX_OP_TEMPLATE_TYPE_IMM_CONST, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 27 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 27 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -9086,9 +9087,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc83f01, .op = 0x26000000 }, .id = HEX_INS_J4_CMPEQN1_T_JUMPNV_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, { .info = HEX_OP_TEMPLATE_TYPE_IMM_CONST, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 27 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 27 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -9101,9 +9102,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc83f01, .op = 0x26002000 }, .id = HEX_INS_J4_CMPEQN1_T_JUMPNV_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, { .info = HEX_OP_TEMPLATE_TYPE_IMM_CONST, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 26 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 26 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -9116,9 +9117,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc82001, .op = 0x20c00000 }, .id = HEX_INS_J4_CMPGT_F_JUMPNV_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 28 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 28 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -9131,9 +9132,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc82001, .op = 0x20c02000 }, .id = HEX_INS_J4_CMPGT_F_JUMPNV_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 27 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 27 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -9146,9 +9147,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc82001, .op = 0x20800000 }, .id = HEX_INS_J4_CMPGT_T_JUMPNV_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 27 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 27 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -9161,9 +9162,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc82001, .op = 0x20802000 }, .id = HEX_INS_J4_CMPGT_T_JUMPNV_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 26 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 26 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -9176,9 +9177,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc82001, .op = 0x24c00000 }, .id = HEX_INS_J4_CMPGTI_F_JUMPNV_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 28 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 28 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -9191,9 +9192,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc82001, .op = 0x24c02000 }, .id = HEX_INS_J4_CMPGTI_F_JUMPNV_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 27 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 27 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -9206,9 +9207,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc82001, .op = 0x24800000 }, .id = HEX_INS_J4_CMPGTI_T_JUMPNV_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 27 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 27 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -9221,9 +9222,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc82001, .op = 0x24802000 }, .id = HEX_INS_J4_CMPGTI_T_JUMPNV_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 26 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 26 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -9236,9 +9237,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc83f01, .op = 0x26c00000 }, .id = HEX_INS_J4_CMPGTN1_F_JUMPNV_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, { .info = HEX_OP_TEMPLATE_TYPE_IMM_CONST, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 28 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 28 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -9251,9 +9252,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc83f01, .op = 0x26c02000 }, .id = HEX_INS_J4_CMPGTN1_F_JUMPNV_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, { .info = HEX_OP_TEMPLATE_TYPE_IMM_CONST, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 27 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 27 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -9266,9 +9267,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc83f01, .op = 0x26800000 }, .id = HEX_INS_J4_CMPGTN1_T_JUMPNV_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, { .info = HEX_OP_TEMPLATE_TYPE_IMM_CONST, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 27 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 27 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -9281,9 +9282,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc83f01, .op = 0x26802000 }, .id = HEX_INS_J4_CMPGTN1_T_JUMPNV_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, { .info = HEX_OP_TEMPLATE_TYPE_IMM_CONST, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 26 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 26 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -9296,9 +9297,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc82001, .op = 0x21400000 }, .id = HEX_INS_J4_CMPGTU_F_JUMPNV_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 29 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 29 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -9311,9 +9312,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc82001, .op = 0x21402000 }, .id = HEX_INS_J4_CMPGTU_F_JUMPNV_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 28 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 28 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -9326,9 +9327,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc82001, .op = 0x21000000 }, .id = HEX_INS_J4_CMPGTU_T_JUMPNV_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 28 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 28 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -9341,9 +9342,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc82001, .op = 0x21002000 }, .id = HEX_INS_J4_CMPGTU_T_JUMPNV_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 27 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 27 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -9356,9 +9357,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc82001, .op = 0x25400000 }, .id = HEX_INS_J4_CMPGTUI_F_JUMPNV_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 29 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 29 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -9371,9 +9372,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc82001, .op = 0x25402000 }, .id = HEX_INS_J4_CMPGTUI_F_JUMPNV_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 28 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 28 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -9386,9 +9387,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc82001, .op = 0x25000000 }, .id = HEX_INS_J4_CMPGTUI_T_JUMPNV_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 28 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 28 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -9401,9 +9402,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc82001, .op = 0x25002000 }, .id = HEX_INS_J4_CMPGTUI_T_JUMPNV_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 27 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 27 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -9416,9 +9417,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc82001, .op = 0x21c00000 }, .id = HEX_INS_J4_CMPLT_F_JUMPNV_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 28 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 28 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -9431,9 +9432,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc82001, .op = 0x21c02000 }, .id = HEX_INS_J4_CMPLT_F_JUMPNV_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 27 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 27 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -9446,9 +9447,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc82001, .op = 0x21800000 }, .id = HEX_INS_J4_CMPLT_T_JUMPNV_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 27 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 27 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -9461,9 +9462,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc82001, .op = 0x21802000 }, .id = HEX_INS_J4_CMPLT_T_JUMPNV_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 26 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 26 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -9476,9 +9477,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc82001, .op = 0x22400000 }, .id = HEX_INS_J4_CMPLTU_F_JUMPNV_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 29 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 29 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -9491,9 +9492,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc82001, .op = 0x22402000 }, .id = HEX_INS_J4_CMPLTU_F_JUMPNV_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 28 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 28 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -9506,9 +9507,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc82001, .op = 0x22000000 }, .id = HEX_INS_J4_CMPLTU_T_JUMPNV_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 28 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 28 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -9521,9 +9522,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc82001, .op = 0x22002000 }, .id = HEX_INS_J4_CMPLTU_T_JUMPNV_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 27 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 27 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -9536,8 +9537,8 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc83f01, .op = 0x25c00000 }, .id = HEX_INS_J4_TSTBIT0_F_JUMPNV_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 30 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 30 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -9550,8 +9551,8 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc83f01, .op = 0x25c02000 }, .id = HEX_INS_J4_TSTBIT0_F_JUMPNV_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 29 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 29 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -9564,8 +9565,8 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc83f01, .op = 0x25800000 }, .id = HEX_INS_J4_TSTBIT0_T_JUMPNV_NT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 29 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 29 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -9578,8 +9579,8 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffc83f01, .op = 0x25802000 }, .id = HEX_INS_J4_TSTBIT0_T_JUMPNV_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .imm_scale = 2, .syntax = 28 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x2, 20 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 28 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -9592,9 +9593,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe018e0, .op = 0x280000e0 }, .id = HEX_INS_V6_VL32UB_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -9606,9 +9607,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe038e0, .op = 0x290000e0 }, .id = HEX_INS_V6_VL32UB_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -9620,9 +9621,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x2b0000e0 }, .id = HEX_INS_V6_VL32UB_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -9634,9 +9635,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe018e0, .op = 0x28000000 }, .id = HEX_INS_V6_VL32B_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -9648,9 +9649,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe018e0, .op = 0x28000020 }, .id = HEX_INS_V6_VL32B_CUR_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -9662,10 +9663,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe000e0, .op = 0x288000a0 }, .id = HEX_INS_V6_VL32B_CUR_NPRED_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 20 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_VEC_FALSE, @@ -9678,10 +9679,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe020e0, .op = 0x298000a0 }, .id = HEX_INS_V6_VL32B_CUR_NPRED_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 21 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_VEC_FALSE, @@ -9694,10 +9695,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe007e0, .op = 0x2b8000a0 }, .id = HEX_INS_V6_VL32B_CUR_NPRED_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 21 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_VEC_FALSE, @@ -9710,9 +9711,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe038e0, .op = 0x29000020 }, .id = HEX_INS_V6_VL32B_CUR_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -9724,9 +9725,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x2b000020 }, .id = HEX_INS_V6_VL32B_CUR_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -9738,10 +9739,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe000e0, .op = 0x28800080 }, .id = HEX_INS_V6_VL32B_CUR_PRED_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 19 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -9754,10 +9755,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe020e0, .op = 0x29800080 }, .id = HEX_INS_V6_VL32B_CUR_PRED_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 20 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -9770,10 +9771,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe007e0, .op = 0x2b800080 }, .id = HEX_INS_V6_VL32B_CUR_PRED_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 20 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -9786,10 +9787,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe000e0, .op = 0x28800060 }, .id = HEX_INS_V6_VL32B_NPRED_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 16 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_VEC_FALSE, @@ -9802,10 +9803,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe020e0, .op = 0x29800060 }, .id = HEX_INS_V6_VL32B_NPRED_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 17 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_VEC_FALSE, @@ -9818,10 +9819,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe007e0, .op = 0x2b800060 }, .id = HEX_INS_V6_VL32B_NPRED_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 17 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_VEC_FALSE, @@ -9834,9 +9835,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe018e0, .op = 0x28400000 }, .id = HEX_INS_V6_VL32B_NT_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -9848,9 +9849,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe018e0, .op = 0x28400020 }, .id = HEX_INS_V6_VL32B_NT_CUR_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -9862,10 +9863,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe000e0, .op = 0x28c000a0 }, .id = HEX_INS_V6_VL32B_NT_CUR_NPRED_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 20 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_VEC_FALSE, @@ -9878,10 +9879,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe020e0, .op = 0x29c000a0 }, .id = HEX_INS_V6_VL32B_NT_CUR_NPRED_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 21 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_VEC_FALSE, @@ -9894,10 +9895,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe007e0, .op = 0x2bc000a0 }, .id = HEX_INS_V6_VL32B_NT_CUR_NPRED_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 21 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_VEC_FALSE, @@ -9910,9 +9911,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe038e0, .op = 0x29400020 }, .id = HEX_INS_V6_VL32B_NT_CUR_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -9924,9 +9925,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x2b400020 }, .id = HEX_INS_V6_VL32B_NT_CUR_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -9938,10 +9939,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe000e0, .op = 0x28c00080 }, .id = HEX_INS_V6_VL32B_NT_CUR_PRED_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 19 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -9954,10 +9955,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe020e0, .op = 0x29c00080 }, .id = HEX_INS_V6_VL32B_NT_CUR_PRED_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 20 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -9970,10 +9971,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe007e0, .op = 0x2bc00080 }, .id = HEX_INS_V6_VL32B_NT_CUR_PRED_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 20 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -9986,10 +9987,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe000e0, .op = 0x28c00060 }, .id = HEX_INS_V6_VL32B_NT_NPRED_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 16 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_VEC_FALSE, @@ -10002,10 +10003,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe020e0, .op = 0x29c00060 }, .id = HEX_INS_V6_VL32B_NT_NPRED_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 17 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_VEC_FALSE, @@ -10018,10 +10019,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe007e0, .op = 0x2bc00060 }, .id = HEX_INS_V6_VL32B_NT_NPRED_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 17 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_VEC_FALSE, @@ -10034,9 +10035,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe038e0, .op = 0x29400000 }, .id = HEX_INS_V6_VL32B_NT_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -10048,9 +10049,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x2b400000 }, .id = HEX_INS_V6_VL32B_NT_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -10062,10 +10063,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe000e0, .op = 0x28c00040 }, .id = HEX_INS_V6_VL32B_NT_PRED_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 15 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -10078,10 +10079,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe020e0, .op = 0x29c00040 }, .id = HEX_INS_V6_VL32B_NT_PRED_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 16 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -10094,10 +10095,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe007e0, .op = 0x2bc00040 }, .id = HEX_INS_V6_VL32B_NT_PRED_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 16 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -10110,9 +10111,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe018e0, .op = 0x28400040 }, .id = HEX_INS_V6_VL32B_NT_TMP_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -10124,10 +10125,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe000e0, .op = 0x28c000e0 }, .id = HEX_INS_V6_VL32B_NT_TMP_NPRED_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 20 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_VEC_FALSE, @@ -10140,10 +10141,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe020e0, .op = 0x29c000e0 }, .id = HEX_INS_V6_VL32B_NT_TMP_NPRED_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 21 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_VEC_FALSE, @@ -10156,10 +10157,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe007e0, .op = 0x2bc000e0 }, .id = HEX_INS_V6_VL32B_NT_TMP_NPRED_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 21 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_VEC_FALSE, @@ -10172,9 +10173,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe038e0, .op = 0x29400040 }, .id = HEX_INS_V6_VL32B_NT_TMP_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -10186,9 +10187,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x2b400040 }, .id = HEX_INS_V6_VL32B_NT_TMP_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -10200,10 +10201,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe000e0, .op = 0x28c000c0 }, .id = HEX_INS_V6_VL32B_NT_TMP_PRED_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 19 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -10216,10 +10217,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe020e0, .op = 0x29c000c0 }, .id = HEX_INS_V6_VL32B_NT_TMP_PRED_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 20 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -10232,10 +10233,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe007e0, .op = 0x2bc000c0 }, .id = HEX_INS_V6_VL32B_NT_TMP_PRED_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 20 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -10248,9 +10249,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe038e0, .op = 0x29000000 }, .id = HEX_INS_V6_VL32B_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -10262,9 +10263,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x2b000000 }, .id = HEX_INS_V6_VL32B_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -10276,10 +10277,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe000e0, .op = 0x28800040 }, .id = HEX_INS_V6_VL32B_PRED_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 15 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -10292,10 +10293,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe020e0, .op = 0x29800040 }, .id = HEX_INS_V6_VL32B_PRED_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 16 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -10308,10 +10309,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe007e0, .op = 0x2b800040 }, .id = HEX_INS_V6_VL32B_PRED_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 16 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -10324,9 +10325,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe018e0, .op = 0x28000040 }, .id = HEX_INS_V6_VL32B_TMP_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -10338,10 +10339,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe000e0, .op = 0x288000e0 }, .id = HEX_INS_V6_VL32B_TMP_NPRED_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 20 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_VEC_FALSE, @@ -10354,10 +10355,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe020e0, .op = 0x298000e0 }, .id = HEX_INS_V6_VL32B_TMP_NPRED_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 21 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_VEC_FALSE, @@ -10370,10 +10371,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe007e0, .op = 0x2b8000e0 }, .id = HEX_INS_V6_VL32B_TMP_NPRED_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 21 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_VEC_FALSE, @@ -10386,9 +10387,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe038e0, .op = 0x29000040 }, .id = HEX_INS_V6_VL32B_TMP_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -10400,9 +10401,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x2b000040 }, .id = HEX_INS_V6_VL32B_TMP_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -10414,10 +10415,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe000e0, .op = 0x288000c0 }, .id = HEX_INS_V6_VL32B_TMP_PRED_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 19 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -10430,10 +10431,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe020e0, .op = 0x298000c0 }, .id = HEX_INS_V6_VL32B_TMP_PRED_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 20 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -10446,10 +10447,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe007e0, .op = 0x2b8000c0 }, .id = HEX_INS_V6_VL32B_TMP_PRED_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 20 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -10462,9 +10463,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe018e0, .op = 0x282000e0 }, .id = HEX_INS_V6_VS32UB_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -10476,10 +10477,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe000e0, .op = 0x28a000e0 }, .id = HEX_INS_V6_VS32UB_NPRED_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 18 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_VEC_FALSE, @@ -10492,10 +10493,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe020e0, .op = 0x29a000e0 }, .id = HEX_INS_V6_VS32UB_NPRED_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 19 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_VEC_FALSE, @@ -10508,10 +10509,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe007e0, .op = 0x2ba000e0 }, .id = HEX_INS_V6_VS32UB_NPRED_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 19 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_VEC_FALSE, @@ -10524,9 +10525,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe038e0, .op = 0x292000e0 }, .id = HEX_INS_V6_VS32UB_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -10538,9 +10539,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x2b2000e0 }, .id = HEX_INS_V6_VS32UB_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -10552,10 +10553,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe000e0, .op = 0x28a000c0 }, .id = HEX_INS_V6_VS32UB_PRED_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 17 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -10568,10 +10569,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe020e0, .op = 0x29a000c0 }, .id = HEX_INS_V6_VS32UB_PRED_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 18 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -10584,10 +10585,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe007e0, .op = 0x2ba000c0 }, .id = HEX_INS_V6_VS32UB_PRED_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 18 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -10600,9 +10601,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe018e0, .op = 0x28200000 }, .id = HEX_INS_V6_VS32B_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -10614,9 +10615,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe018f8, .op = 0x28200020 }, .id = HEX_INS_V6_VS32B_NEW_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -10628,10 +10629,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe000f8, .op = 0x28a00068 }, .id = HEX_INS_V6_VS32B_NEW_NPRED_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 17 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_VEC_FALSE, @@ -10644,10 +10645,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe020f8, .op = 0x29a00068 }, .id = HEX_INS_V6_VS32B_NEW_NPRED_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 18 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_VEC_FALSE, @@ -10660,10 +10661,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe007f8, .op = 0x2ba00068 }, .id = HEX_INS_V6_VS32B_NEW_NPRED_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 18 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_VEC_FALSE, @@ -10676,9 +10677,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe038f8, .op = 0x29200020 }, .id = HEX_INS_V6_VS32B_NEW_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -10690,9 +10691,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe01ff8, .op = 0x2b200020 }, .id = HEX_INS_V6_VS32B_NEW_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -10704,10 +10705,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe000f8, .op = 0x28a00040 }, .id = HEX_INS_V6_VS32B_NEW_PRED_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 16 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -10720,10 +10721,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe020f8, .op = 0x29a00040 }, .id = HEX_INS_V6_VS32B_NEW_PRED_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 17 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -10736,10 +10737,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe007f8, .op = 0x2ba00040 }, .id = HEX_INS_V6_VS32B_NEW_PRED_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 17 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -10752,10 +10753,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe000e0, .op = 0x28a00020 }, .id = HEX_INS_V6_VS32B_NPRED_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 17 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_VEC_FALSE, @@ -10768,10 +10769,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe020e0, .op = 0x29a00020 }, .id = HEX_INS_V6_VS32B_NPRED_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 18 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_VEC_FALSE, @@ -10784,10 +10785,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe007e0, .op = 0x2ba00020 }, .id = HEX_INS_V6_VS32B_NPRED_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 18 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_VEC_FALSE, @@ -10800,10 +10801,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe000e0, .op = 0x28800020 }, .id = HEX_INS_V6_VS32B_NQPRED_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -10815,10 +10816,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe020e0, .op = 0x29800020 }, .id = HEX_INS_V6_VS32B_NQPRED_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 18 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -10830,10 +10831,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe007e0, .op = 0x2b800020 }, .id = HEX_INS_V6_VS32B_NQPRED_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 18 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -10845,9 +10846,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe018e0, .op = 0x28600000 }, .id = HEX_INS_V6_VS32B_NT_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -10859,9 +10860,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe018f8, .op = 0x28600020 }, .id = HEX_INS_V6_VS32B_NT_NEW_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -10873,10 +10874,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe000f8, .op = 0x28e00078 }, .id = HEX_INS_V6_VS32B_NT_NEW_NPRED_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 20 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_VEC_FALSE, @@ -10889,10 +10890,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe020f8, .op = 0x29e00078 }, .id = HEX_INS_V6_VS32B_NT_NEW_NPRED_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 21 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_VEC_FALSE, @@ -10905,10 +10906,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe007f8, .op = 0x2be00078 }, .id = HEX_INS_V6_VS32B_NT_NEW_NPRED_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 21 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_VEC_FALSE, @@ -10921,9 +10922,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe038f8, .op = 0x29600020 }, .id = HEX_INS_V6_VS32B_NT_NEW_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -10935,9 +10936,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe01ff8, .op = 0x2b600020 }, .id = HEX_INS_V6_VS32B_NT_NEW_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -10949,10 +10950,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe000f8, .op = 0x28e00050 }, .id = HEX_INS_V6_VS32B_NT_NEW_PRED_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 19 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -10965,10 +10966,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe020f8, .op = 0x29e00050 }, .id = HEX_INS_V6_VS32B_NT_NEW_PRED_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 20 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -10981,10 +10982,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe007f8, .op = 0x2be00050 }, .id = HEX_INS_V6_VS32B_NT_NEW_PRED_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 20 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -10997,10 +10998,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe000e0, .op = 0x28e00020 }, .id = HEX_INS_V6_VS32B_NT_NPRED_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 20 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_VEC_FALSE, @@ -11013,10 +11014,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe020e0, .op = 0x29e00020 }, .id = HEX_INS_V6_VS32B_NT_NPRED_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 21 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_VEC_FALSE, @@ -11029,10 +11030,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe007e0, .op = 0x2be00020 }, .id = HEX_INS_V6_VS32B_NT_NPRED_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 21 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_VEC_FALSE, @@ -11045,10 +11046,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe000e0, .op = 0x28c00020 }, .id = HEX_INS_V6_VS32B_NT_NQPRED_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 20 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11060,10 +11061,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe020e0, .op = 0x29c00020 }, .id = HEX_INS_V6_VS32B_NT_NQPRED_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 21 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11075,10 +11076,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe007e0, .op = 0x2bc00020 }, .id = HEX_INS_V6_VS32B_NT_NQPRED_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 21 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11090,9 +11091,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe038e0, .op = 0x29600000 }, .id = HEX_INS_V6_VS32B_NT_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11104,9 +11105,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x2b600000 }, .id = HEX_INS_V6_VS32B_NT_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11118,10 +11119,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe000e0, .op = 0x28e00000 }, .id = HEX_INS_V6_VS32B_NT_PRED_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 19 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -11134,10 +11135,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe020e0, .op = 0x29e00000 }, .id = HEX_INS_V6_VS32B_NT_PRED_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 20 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -11150,10 +11151,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe007e0, .op = 0x2be00000 }, .id = HEX_INS_V6_VS32B_NT_PRED_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 20 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -11166,10 +11167,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe000e0, .op = 0x28c00000 }, .id = HEX_INS_V6_VS32B_NT_QPRED_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11181,10 +11182,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe020e0, .op = 0x29c00000 }, .id = HEX_INS_V6_VS32B_NT_QPRED_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 20 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11196,10 +11197,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe007e0, .op = 0x2bc00000 }, .id = HEX_INS_V6_VS32B_NT_QPRED_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 20 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11211,9 +11212,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe038e0, .op = 0x29200000 }, .id = HEX_INS_V6_VS32B_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11225,9 +11226,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x2b200000 }, .id = HEX_INS_V6_VS32B_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11239,10 +11240,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe000e0, .op = 0x28a00000 }, .id = HEX_INS_V6_VS32B_PRED_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 16 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -11255,10 +11256,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe020e0, .op = 0x29a00000 }, .id = HEX_INS_V6_VS32B_PRED_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 17 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -11271,10 +11272,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe007e0, .op = 0x2ba00000 }, .id = HEX_INS_V6_VS32B_PRED_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 17 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -11287,10 +11288,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe000e0, .op = 0x28800000 }, .id = HEX_INS_V6_VS32B_QPRED_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11302,10 +11303,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe020e0, .op = 0x29800000 }, .id = HEX_INS_V6_VS32B_QPRED_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11317,10 +11318,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe007e0, .op = 0x2b800000 }, .id = HEX_INS_V6_VS32B_QPRED_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 's', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11332,8 +11333,8 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe018ff, .op = 0x28200028 }, .id = HEX_INS_V6_VS32B_SRLS_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 6 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11345,8 +11346,8 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe038ff, .op = 0x29200028 }, .id = HEX_INS_V6_VS32B_SRLS_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11358,8 +11359,8 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe01fff, .op = 0x2b200028 }, .id = HEX_INS_V6_VS32B_SRLS_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11371,9 +11372,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x2f000100 }, .id = HEX_INS_V6_VGATHERMH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11385,10 +11386,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe01f80, .op = 0x2f000500 }, .id = HEX_INS_V6_VGATHERMHQ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 23 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 24 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 25 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 's', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 23 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 24 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 25 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11400,9 +11401,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x2f000200 }, .id = HEX_INS_V6_VGATHERMHW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'v', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11414,10 +11415,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe01f80, .op = 0x2f000600 }, .id = HEX_INS_V6_VGATHERMHWQ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 23 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 24 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 25 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 's', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 23 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 24 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'v', .syntax = 25 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11429,9 +11430,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x2f000000 }, .id = HEX_INS_V6_VGATHERMW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11443,10 +11444,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe01f80, .op = 0x2f000400 }, .id = HEX_INS_V6_VGATHERMWQ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 23 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 24 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 25 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 's', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 23 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 24 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 25 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11458,10 +11459,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe000e0, .op = 0x2f200020 }, .id = HEX_INS_V6_VSCATTERMH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'w', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11473,10 +11474,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe000e0, .op = 0x2f2000a0 }, .id = HEX_INS_V6_VSCATTERMH_ADD, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'w', .syntax = 20 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11488,11 +11489,11 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe00080, .op = 0x2f800080 }, .id = HEX_INS_V6_VSCATTERMHQ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 25 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 's', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'w', .syntax = 25 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11504,10 +11505,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe000e0, .op = 0x2f200040 }, .id = HEX_INS_V6_VSCATTERMHW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'v', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'w', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11519,10 +11520,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe000e0, .op = 0x2f2000c0 }, .id = HEX_INS_V6_VSCATTERMHW_ADD, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'v', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'w', .syntax = 20 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11534,11 +11535,11 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe00080, .op = 0x2fa00000 }, .id = HEX_INS_V6_VSCATTERMHWQ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 25 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 's', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_WR, .isa_id = 'v', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'w', .syntax = 25 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11550,10 +11551,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe000e0, .op = 0x2f200000 }, .id = HEX_INS_V6_VSCATTERMW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'w', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11565,10 +11566,10 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe000e0, .op = 0x2f200080 }, .id = HEX_INS_V6_VSCATTERMW_ADD, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'w', .syntax = 20 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11580,11 +11581,11 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe00080, .op = 0x2f800000 }, .id = HEX_INS_V6_VSCATTERMWQ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 25 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_HVX_QR, .isa_id = 's', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'v', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'w', .syntax = 25 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11596,8 +11597,8 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe018ff, .op = 0x2c000000 }, .id = HEX_INS_V6_ZLD_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11609,8 +11610,8 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe038ff, .op = 0x2d000000 }, .id = HEX_INS_V6_ZLD_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11622,8 +11623,8 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe01fff, .op = 0x2d000001 }, .id = HEX_INS_V6_ZLD_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11635,9 +11636,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe000ff, .op = 0x2c800000 }, .id = HEX_INS_V6_ZLD_PRED_AI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 }, { 0x1, 13 } }, .isa_id = 's', .syntax = 16 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -11650,9 +11651,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe020ff, .op = 0x2d800000 }, .id = HEX_INS_V6_ZLD_PRED_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 8 } }, .isa_id = 's', .syntax = 17 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -11665,9 +11666,9 @@ static const HexInsnTemplate templates_normal_0x2[] = { .encoding = { .mask = 0xffe007ff, .op = 0x2d800001 }, .id = HEX_INS_V6_ZLD_PRED_PPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 17 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_VEC_TRUE, @@ -11684,9 +11685,9 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe02060, .op = 0x3e000000 }, .id = HEX_INS_L4_ADD_MEMOPB_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .isa_id = 'u', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11698,9 +11699,9 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe02060, .op = 0x3e200000 }, .id = HEX_INS_L4_ADD_MEMOPH_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .imm_scale = 1, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11712,9 +11713,9 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe02060, .op = 0x3e400000 }, .id = HEX_INS_L4_ADD_MEMOPW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .imm_scale = 2, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .isa_id = 'u', .imm_scale = 2, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11726,9 +11727,9 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe02060, .op = 0x3e000040 }, .id = HEX_INS_L4_AND_MEMOPB_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .isa_id = 'u', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11740,9 +11741,9 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe02060, .op = 0x3e200040 }, .id = HEX_INS_L4_AND_MEMOPH_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .imm_scale = 1, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11754,9 +11755,9 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe02060, .op = 0x3e400040 }, .id = HEX_INS_L4_AND_MEMOPW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .imm_scale = 2, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .isa_id = 'u', .imm_scale = 2, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11768,9 +11769,9 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe02060, .op = 0x3f000000 }, .id = HEX_INS_L4_IADD_MEMOPB_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 0 } }, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .isa_id = 'u', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 0 } }, .isa_id = 'U', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11782,9 +11783,9 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe02060, .op = 0x3f200000 }, .id = HEX_INS_L4_IADD_MEMOPH_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .imm_scale = 1, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 0 } }, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 0 } }, .isa_id = 'U', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11796,9 +11797,9 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe02060, .op = 0x3f400000 }, .id = HEX_INS_L4_IADD_MEMOPW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .imm_scale = 2, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 0 } }, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .isa_id = 'u', .imm_scale = 2, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 0 } }, .isa_id = 'U', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11810,9 +11811,9 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe02060, .op = 0x3f000040 }, .id = HEX_INS_L4_IAND_MEMOPB_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 0 } }, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .isa_id = 'u', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 0 } }, .isa_id = 'U', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11824,9 +11825,9 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe02060, .op = 0x3f200040 }, .id = HEX_INS_L4_IAND_MEMOPH_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .imm_scale = 1, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 0 } }, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 0 } }, .isa_id = 'U', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11838,9 +11839,9 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe02060, .op = 0x3f400040 }, .id = HEX_INS_L4_IAND_MEMOPW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .imm_scale = 2, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 0 } }, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .isa_id = 'u', .imm_scale = 2, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 0 } }, .isa_id = 'U', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11852,9 +11853,9 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe02060, .op = 0x3f000060 }, .id = HEX_INS_L4_IOR_MEMOPB_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 0 } }, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .isa_id = 'u', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 0 } }, .isa_id = 'U', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11866,9 +11867,9 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe02060, .op = 0x3f200060 }, .id = HEX_INS_L4_IOR_MEMOPH_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .imm_scale = 1, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 0 } }, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 0 } }, .isa_id = 'U', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11880,9 +11881,9 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe02060, .op = 0x3f400060 }, .id = HEX_INS_L4_IOR_MEMOPW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .imm_scale = 2, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 0 } }, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .isa_id = 'u', .imm_scale = 2, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 0 } }, .isa_id = 'U', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11894,9 +11895,9 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe02060, .op = 0x3f000020 }, .id = HEX_INS_L4_ISUB_MEMOPB_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 0 } }, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .isa_id = 'u', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 0 } }, .isa_id = 'U', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11908,9 +11909,9 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe02060, .op = 0x3f200020 }, .id = HEX_INS_L4_ISUB_MEMOPH_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .imm_scale = 1, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 0 } }, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 0 } }, .isa_id = 'U', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11922,9 +11923,9 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe02060, .op = 0x3f400020 }, .id = HEX_INS_L4_ISUB_MEMOPW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .imm_scale = 2, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 0 } }, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .isa_id = 'u', .imm_scale = 2, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 0 } }, .isa_id = 'U', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11936,10 +11937,10 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00060, .op = 0x3a000000 }, .id = HEX_INS_L4_LOADRB_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11951,10 +11952,10 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00060, .op = 0x3ac00000 }, .id = HEX_INS_L4_LOADRD_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11966,10 +11967,10 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00060, .op = 0x3a400000 }, .id = HEX_INS_L4_LOADRH_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11981,10 +11982,10 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00060, .op = 0x3a800000 }, .id = HEX_INS_L4_LOADRI_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -11996,10 +11997,10 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00060, .op = 0x3a200000 }, .id = HEX_INS_L4_LOADRUB_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -12011,10 +12012,10 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00060, .op = 0x3a600000 }, .id = HEX_INS_L4_LOADRUH_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -12026,9 +12027,9 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe02060, .op = 0x3e000060 }, .id = HEX_INS_L4_OR_MEMOPB_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .isa_id = 'u', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -12040,9 +12041,9 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe02060, .op = 0x3e200060 }, .id = HEX_INS_L4_OR_MEMOPH_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .imm_scale = 1, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -12054,9 +12055,9 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe02060, .op = 0x3e400060 }, .id = HEX_INS_L4_OR_MEMOPW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .imm_scale = 2, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .isa_id = 'u', .imm_scale = 2, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -12068,11 +12069,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x31000000 }, .id = HEX_INS_L4_PLOADRBF_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 18 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -12085,11 +12086,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x33000000 }, .id = HEX_INS_L4_PLOADRBFNEW_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 20 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 22 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 22 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -12102,11 +12103,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x30000000 }, .id = HEX_INS_L4_PLOADRBT_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 17 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -12119,11 +12120,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x32000000 }, .id = HEX_INS_L4_PLOADRBTNEW_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 21 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -12136,11 +12137,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x31c00000 }, .id = HEX_INS_L4_PLOADRDF_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 18 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -12153,11 +12154,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x33c00000 }, .id = HEX_INS_L4_PLOADRDFNEW_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 20 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 22 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 22 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -12170,11 +12171,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x30c00000 }, .id = HEX_INS_L4_PLOADRDT_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 17 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -12187,11 +12188,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x32c00000 }, .id = HEX_INS_L4_PLOADRDTNEW_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 21 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -12204,11 +12205,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x31400000 }, .id = HEX_INS_L4_PLOADRHF_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 18 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -12221,11 +12222,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x33400000 }, .id = HEX_INS_L4_PLOADRHFNEW_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 20 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 22 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 22 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -12238,11 +12239,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x30400000 }, .id = HEX_INS_L4_PLOADRHT_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 17 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -12255,11 +12256,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x32400000 }, .id = HEX_INS_L4_PLOADRHTNEW_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 21 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -12272,11 +12273,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x31800000 }, .id = HEX_INS_L4_PLOADRIF_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 18 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -12289,11 +12290,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x33800000 }, .id = HEX_INS_L4_PLOADRIFNEW_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 20 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 22 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 22 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -12306,11 +12307,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x30800000 }, .id = HEX_INS_L4_PLOADRIT_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 17 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -12323,11 +12324,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x32800000 }, .id = HEX_INS_L4_PLOADRITNEW_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 21 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -12340,11 +12341,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x31200000 }, .id = HEX_INS_L4_PLOADRUBF_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 19 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -12357,11 +12358,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x33200000 }, .id = HEX_INS_L4_PLOADRUBFNEW_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 20 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 21 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 23 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 23 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -12374,11 +12375,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x30200000 }, .id = HEX_INS_L4_PLOADRUBT_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 18 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -12391,11 +12392,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x32200000 }, .id = HEX_INS_L4_PLOADRUBTNEW_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 20 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 22 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 22 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -12408,11 +12409,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x31600000 }, .id = HEX_INS_L4_PLOADRUHF_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 19 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -12425,11 +12426,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x33600000 }, .id = HEX_INS_L4_PLOADRUHFNEW_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 20 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 21 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 23 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 23 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -12442,11 +12443,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x30600000 }, .id = HEX_INS_L4_PLOADRUHT_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 18 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -12459,11 +12460,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x32600000 }, .id = HEX_INS_L4_PLOADRUHTNEW_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 20 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 22 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 22 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -12476,9 +12477,9 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe02060, .op = 0x3e000020 }, .id = HEX_INS_L4_SUB_MEMOPB_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .isa_id = 'u', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -12490,9 +12491,9 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe02060, .op = 0x3e200020 }, .id = HEX_INS_L4_SUB_MEMOPH_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .imm_scale = 1, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -12504,9 +12505,9 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe02060, .op = 0x3e400020 }, .id = HEX_INS_L4_SUB_MEMOPW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .imm_scale = 2, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .isa_id = 'u', .imm_scale = 2, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -12518,11 +12519,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x35000000 }, .id = HEX_INS_S4_PSTORERBF_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 19 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -12535,11 +12536,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x37000000 }, .id = HEX_INS_S4_PSTORERBFNEW_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 23 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 23 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -12552,11 +12553,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00018, .op = 0x35a00000 }, .id = HEX_INS_S4_PSTORERBNEWF_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 19 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -12569,11 +12570,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00018, .op = 0x37a00000 }, .id = HEX_INS_S4_PSTORERBNEWFNEW_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 23 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 23 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -12586,11 +12587,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00018, .op = 0x34a00000 }, .id = HEX_INS_S4_PSTORERBNEWT_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 18 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -12603,11 +12604,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00018, .op = 0x36a00000 }, .id = HEX_INS_S4_PSTORERBNEWTNEW_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 22 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 22 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -12620,11 +12621,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x34000000 }, .id = HEX_INS_S4_PSTORERBT_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 18 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -12637,11 +12638,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x36000000 }, .id = HEX_INS_S4_PSTORERBTNEW_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 22 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 22 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -12654,11 +12655,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x35c00000 }, .id = HEX_INS_S4_PSTORERDF_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 19 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -12671,11 +12672,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x37c00000 }, .id = HEX_INS_S4_PSTORERDFNEW_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 23 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 23 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -12688,11 +12689,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x34c00000 }, .id = HEX_INS_S4_PSTORERDT_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 18 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -12705,11 +12706,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x36c00000 }, .id = HEX_INS_S4_PSTORERDTNEW_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 22 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 22 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -12722,11 +12723,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x35600000 }, .id = HEX_INS_S4_PSTORERFF_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 19 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -12739,11 +12740,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x37600000 }, .id = HEX_INS_S4_PSTORERFFNEW_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 23 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 23 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -12756,11 +12757,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x34600000 }, .id = HEX_INS_S4_PSTORERFT_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 18 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -12773,11 +12774,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x36600000 }, .id = HEX_INS_S4_PSTORERFTNEW_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 22 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 22 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -12790,11 +12791,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x35400000 }, .id = HEX_INS_S4_PSTORERHF_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 19 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -12807,11 +12808,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x37400000 }, .id = HEX_INS_S4_PSTORERHFNEW_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 23 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 23 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -12824,11 +12825,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00018, .op = 0x35a00008 }, .id = HEX_INS_S4_PSTORERHNEWF_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 19 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -12841,11 +12842,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00018, .op = 0x37a00008 }, .id = HEX_INS_S4_PSTORERHNEWFNEW_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 23 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 23 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -12858,11 +12859,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00018, .op = 0x34a00008 }, .id = HEX_INS_S4_PSTORERHNEWT_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 18 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -12875,11 +12876,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00018, .op = 0x36a00008 }, .id = HEX_INS_S4_PSTORERHNEWTNEW_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 22 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 22 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -12892,11 +12893,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x34400000 }, .id = HEX_INS_S4_PSTORERHT_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 18 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -12909,11 +12910,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x36400000 }, .id = HEX_INS_S4_PSTORERHTNEW_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 22 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 22 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -12926,11 +12927,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x35800000 }, .id = HEX_INS_S4_PSTORERIF_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 19 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -12943,11 +12944,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x37800000 }, .id = HEX_INS_S4_PSTORERIFNEW_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 23 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 23 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -12960,11 +12961,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00018, .op = 0x35a00010 }, .id = HEX_INS_S4_PSTORERINEWF_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 19 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -12977,11 +12978,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00018, .op = 0x37a00010 }, .id = HEX_INS_S4_PSTORERINEWFNEW_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 23 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 23 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -12994,11 +12995,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00018, .op = 0x34a00010 }, .id = HEX_INS_S4_PSTORERINEWT_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 18 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -13011,11 +13012,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00018, .op = 0x36a00010 }, .id = HEX_INS_S4_PSTORERINEWTNEW_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 22 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 22 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -13028,11 +13029,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x34800000 }, .id = HEX_INS_S4_PSTORERIT_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 18 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -13045,11 +13046,11 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x36800000 }, .id = HEX_INS_S4_PSTORERITNEW_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 22 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 22 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -13062,9 +13063,9 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x3c000000 }, .id = HEX_INS_S4_STOREIRB_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 7 } }, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 0 }, { 0x1, 13 } }, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 7 } }, .isa_id = 'u', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 0 }, { 0x1, 13 } }, .isa_id = 'S', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -13076,10 +13077,10 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x38800000 }, .id = HEX_INS_S4_STOREIRBF_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 7 } }, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 0 }, { 0x1, 13 } }, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 7 } }, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 0 }, { 0x1, 13 } }, .isa_id = 'S', .syntax = 17 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -13092,10 +13093,10 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x39800000 }, .id = HEX_INS_S4_STOREIRBFNEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 7 } }, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 0 }, { 0x1, 13 } }, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 7 } }, .isa_id = 'u', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 0 }, { 0x1, 13 } }, .isa_id = 'S', .syntax = 21 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -13108,10 +13109,10 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x38000000 }, .id = HEX_INS_S4_STOREIRBT_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 7 } }, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 0 }, { 0x1, 13 } }, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 7 } }, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 0 }, { 0x1, 13 } }, .isa_id = 'S', .syntax = 16 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -13124,10 +13125,10 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x39000000 }, .id = HEX_INS_S4_STOREIRBTNEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 7 } }, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 0 }, { 0x1, 13 } }, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 7 } }, .isa_id = 'u', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 0 }, { 0x1, 13 } }, .isa_id = 'S', .syntax = 20 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -13140,9 +13141,9 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x3c200000 }, .id = HEX_INS_S4_STOREIRH_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 7 } }, .imm_scale = 1, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 0 }, { 0x1, 13 } }, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 7 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 0 }, { 0x1, 13 } }, .isa_id = 'S', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -13154,10 +13155,10 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x38a00000 }, .id = HEX_INS_S4_STOREIRHF_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 7 } }, .imm_scale = 1, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 0 }, { 0x1, 13 } }, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 7 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 0 }, { 0x1, 13 } }, .isa_id = 'S', .syntax = 17 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -13170,10 +13171,10 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x39a00000 }, .id = HEX_INS_S4_STOREIRHFNEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 7 } }, .imm_scale = 1, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 0 }, { 0x1, 13 } }, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 7 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 0 }, { 0x1, 13 } }, .isa_id = 'S', .syntax = 21 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -13186,10 +13187,10 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x38200000 }, .id = HEX_INS_S4_STOREIRHT_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 7 } }, .imm_scale = 1, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 0 }, { 0x1, 13 } }, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 7 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 0 }, { 0x1, 13 } }, .isa_id = 'S', .syntax = 16 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -13202,10 +13203,10 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x39200000 }, .id = HEX_INS_S4_STOREIRHTNEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 7 } }, .imm_scale = 1, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 0 }, { 0x1, 13 } }, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 7 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 0 }, { 0x1, 13 } }, .isa_id = 'S', .syntax = 20 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -13218,9 +13219,9 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x3c400000 }, .id = HEX_INS_S4_STOREIRI_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 7 } }, .imm_scale = 2, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 0 }, { 0x1, 13 } }, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 7 } }, .isa_id = 'u', .imm_scale = 2, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 0 }, { 0x1, 13 } }, .isa_id = 'S', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -13232,10 +13233,10 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x38c00000 }, .id = HEX_INS_S4_STOREIRIF_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 7 } }, .imm_scale = 2, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 0 }, { 0x1, 13 } }, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 7 } }, .isa_id = 'u', .imm_scale = 2, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 0 }, { 0x1, 13 } }, .isa_id = 'S', .syntax = 17 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -13248,10 +13249,10 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x39c00000 }, .id = HEX_INS_S4_STOREIRIFNEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 7 } }, .imm_scale = 2, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 0 }, { 0x1, 13 } }, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 7 } }, .isa_id = 'u', .imm_scale = 2, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 0 }, { 0x1, 13 } }, .isa_id = 'S', .syntax = 21 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -13264,10 +13265,10 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x38400000 }, .id = HEX_INS_S4_STOREIRIT_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 7 } }, .imm_scale = 2, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 0 }, { 0x1, 13 } }, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 7 } }, .isa_id = 'u', .imm_scale = 2, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 0 }, { 0x1, 13 } }, .isa_id = 'S', .syntax = 16 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -13280,10 +13281,10 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00000, .op = 0x39400000 }, .id = HEX_INS_S4_STOREIRITNEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 7 } }, .imm_scale = 2, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 0 }, { 0x1, 13 } }, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 7 } }, .isa_id = 'u', .imm_scale = 2, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 0 }, { 0x1, 13 } }, .isa_id = 'S', .syntax = 20 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -13296,10 +13297,10 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00060, .op = 0x3b000000 }, .id = HEX_INS_S4_STORERB_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -13311,10 +13312,10 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00078, .op = 0x3ba00000 }, .id = HEX_INS_S4_STORERBNEW_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -13326,10 +13327,10 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00060, .op = 0x3bc00000 }, .id = HEX_INS_S4_STORERD_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -13341,10 +13342,10 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00060, .op = 0x3b600000 }, .id = HEX_INS_S4_STORERF_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -13356,10 +13357,10 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00060, .op = 0x3b400000 }, .id = HEX_INS_S4_STORERH_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -13371,10 +13372,10 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00078, .op = 0x3ba00008 }, .id = HEX_INS_S4_STORERHNEW_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -13386,10 +13387,10 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00060, .op = 0x3b800000 }, .id = HEX_INS_S4_STORERI_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -13401,10 +13402,10 @@ static const HexInsnTemplate templates_normal_0x3[] = { .encoding = { .mask = 0xffe00078, .op = 0x3ba00010 }, .id = HEX_INS_S4_STORERINEW_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -13420,8 +13421,8 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xf9e00000, .op = 0x49000000 }, .id = HEX_INS_L2_LOADRBGP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x5, 16 }, { 0x2, 25 } }, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x5, 16 }, { 0x2, 25 } }, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -13433,8 +13434,8 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xf9e00000, .op = 0x49c00000 }, .id = HEX_INS_L2_LOADRDGP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x5, 16 }, { 0x2, 25 } }, .imm_scale = 3, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x5, 16 }, { 0x2, 25 } }, .isa_id = 'u', .imm_scale = 3, .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -13446,8 +13447,8 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xf9e00000, .op = 0x49400000 }, .id = HEX_INS_L2_LOADRHGP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x5, 16 }, { 0x2, 25 } }, .imm_scale = 1, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x5, 16 }, { 0x2, 25 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -13459,8 +13460,8 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xf9e00000, .op = 0x49800000 }, .id = HEX_INS_L2_LOADRIGP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x5, 16 }, { 0x2, 25 } }, .imm_scale = 2, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x5, 16 }, { 0x2, 25 } }, .isa_id = 'u', .imm_scale = 2, .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -13472,8 +13473,8 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xf9e00000, .op = 0x49200000 }, .id = HEX_INS_L2_LOADRUBGP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x5, 16 }, { 0x2, 25 } }, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x5, 16 }, { 0x2, 25 } }, .isa_id = 'u', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -13485,8 +13486,8 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xf9e00000, .op = 0x49600000 }, .id = HEX_INS_L2_LOADRUHGP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x5, 16 }, { 0x2, 25 } }, .imm_scale = 1, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x5, 16 }, { 0x2, 25 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -13498,10 +13499,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe02000, .op = 0x45000000 }, .id = HEX_INS_L2_PLOADRBF_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .isa_id = 'u', .syntax = 16 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -13514,10 +13515,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe02000, .op = 0x47000000 }, .id = HEX_INS_L2_PLOADRBFNEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .isa_id = 'u', .syntax = 20 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -13530,10 +13531,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe02000, .op = 0x41000000 }, .id = HEX_INS_L2_PLOADRBT_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .isa_id = 'u', .syntax = 15 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -13546,10 +13547,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe02000, .op = 0x43000000 }, .id = HEX_INS_L2_PLOADRBTNEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .isa_id = 'u', .syntax = 19 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -13562,10 +13563,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe02000, .op = 0x45c00000 }, .id = HEX_INS_L2_PLOADRDF_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .imm_scale = 3, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .isa_id = 'u', .imm_scale = 3, .syntax = 16 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -13578,10 +13579,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe02000, .op = 0x47c00000 }, .id = HEX_INS_L2_PLOADRDFNEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .imm_scale = 3, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .isa_id = 'u', .imm_scale = 3, .syntax = 20 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -13594,10 +13595,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe02000, .op = 0x41c00000 }, .id = HEX_INS_L2_PLOADRDT_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .imm_scale = 3, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .isa_id = 'u', .imm_scale = 3, .syntax = 15 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -13610,10 +13611,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe02000, .op = 0x43c00000 }, .id = HEX_INS_L2_PLOADRDTNEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .imm_scale = 3, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .isa_id = 'u', .imm_scale = 3, .syntax = 19 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -13626,10 +13627,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe02000, .op = 0x45400000 }, .id = HEX_INS_L2_PLOADRHF_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .imm_scale = 1, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 16 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -13642,10 +13643,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe02000, .op = 0x47400000 }, .id = HEX_INS_L2_PLOADRHFNEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .imm_scale = 1, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 20 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -13658,10 +13659,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe02000, .op = 0x41400000 }, .id = HEX_INS_L2_PLOADRHT_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .imm_scale = 1, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 15 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -13674,10 +13675,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe02000, .op = 0x43400000 }, .id = HEX_INS_L2_PLOADRHTNEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .imm_scale = 1, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 19 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -13690,10 +13691,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe02000, .op = 0x45800000 }, .id = HEX_INS_L2_PLOADRIF_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .imm_scale = 2, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .isa_id = 'u', .imm_scale = 2, .syntax = 16 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -13706,10 +13707,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe02000, .op = 0x47800000 }, .id = HEX_INS_L2_PLOADRIFNEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .imm_scale = 2, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .isa_id = 'u', .imm_scale = 2, .syntax = 20 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -13722,10 +13723,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe02000, .op = 0x41800000 }, .id = HEX_INS_L2_PLOADRIT_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .imm_scale = 2, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .isa_id = 'u', .imm_scale = 2, .syntax = 15 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -13738,10 +13739,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe02000, .op = 0x43800000 }, .id = HEX_INS_L2_PLOADRITNEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .imm_scale = 2, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .isa_id = 'u', .imm_scale = 2, .syntax = 19 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -13754,10 +13755,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe02000, .op = 0x45200000 }, .id = HEX_INS_L2_PLOADRUBF_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .isa_id = 'u', .syntax = 17 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -13770,10 +13771,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe02000, .op = 0x47200000 }, .id = HEX_INS_L2_PLOADRUBFNEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 20 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .isa_id = 'u', .syntax = 21 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -13786,10 +13787,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe02000, .op = 0x41200000 }, .id = HEX_INS_L2_PLOADRUBT_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .isa_id = 'u', .syntax = 16 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -13802,10 +13803,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe02000, .op = 0x43200000 }, .id = HEX_INS_L2_PLOADRUBTNEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .isa_id = 'u', .syntax = 20 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -13818,10 +13819,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe02000, .op = 0x45600000 }, .id = HEX_INS_L2_PLOADRUHF_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .imm_scale = 1, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 17 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -13834,10 +13835,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe02000, .op = 0x47600000 }, .id = HEX_INS_L2_PLOADRUHFNEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 20 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .imm_scale = 1, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 21 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -13850,10 +13851,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe02000, .op = 0x41600000 }, .id = HEX_INS_L2_PLOADRUHT_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .imm_scale = 1, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 16 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -13866,10 +13867,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe02000, .op = 0x43600000 }, .id = HEX_INS_L2_PLOADRUHTNEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .imm_scale = 1, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 11 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 5 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 20 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -13877,197 +13878,15 @@ static const HexInsnTemplate templates_normal_0x4[] = { .syntax = "if (.new) = memuh(+)", .flags = HEX_INSN_TEMPLATE_FLAG_PREDICATED, }, - { - // 01001ii1000iiiiiPPiiiiiiiiiddddd | Rd = memb(Ii) - .encoding = { .mask = 0xf9e00000, .op = 0x49000000 }, - .id = HEX_INS_PS_LOADRBABS, - .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x5, 16 }, { 0x2, 25 } }, .syntax = 8 }, - }, - .pred = HEX_NOPRED, - .cond = RZ_TYPE_COND_AL, - .type = RZ_ANALYSIS_OP_TYPE_NULL, - .syntax = " = memb()", - }, - { - // 01001ii1110iiiiiPPiiiiiiiiiddddd | Rdd = memd(Ii) - .encoding = { .mask = 0xf9e00000, .op = 0x49c00000 }, - .id = HEX_INS_PS_LOADRDABS, - .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x5, 16 }, { 0x2, 25 } }, .imm_scale = 3, .syntax = 8 }, - }, - .pred = HEX_NOPRED, - .cond = RZ_TYPE_COND_AL, - .type = RZ_ANALYSIS_OP_TYPE_NULL, - .syntax = " = memd()", - }, - { - // 01001ii1010iiiiiPPiiiiiiiiiddddd | Rd = memh(Ii) - .encoding = { .mask = 0xf9e00000, .op = 0x49400000 }, - .id = HEX_INS_PS_LOADRHABS, - .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x5, 16 }, { 0x2, 25 } }, .imm_scale = 1, .syntax = 8 }, - }, - .pred = HEX_NOPRED, - .cond = RZ_TYPE_COND_AL, - .type = RZ_ANALYSIS_OP_TYPE_NULL, - .syntax = " = memh()", - }, - { - // 01001ii1100iiiiiPPiiiiiiiiiddddd | Rd = memw(Ii) - .encoding = { .mask = 0xf9e00000, .op = 0x49800000 }, - .id = HEX_INS_PS_LOADRIABS, - .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x5, 16 }, { 0x2, 25 } }, .imm_scale = 2, .syntax = 8 }, - }, - .pred = HEX_NOPRED, - .cond = RZ_TYPE_COND_AL, - .type = RZ_ANALYSIS_OP_TYPE_NULL, - .syntax = " = memw()", - }, - { - // 01001ii1001iiiiiPPiiiiiiiiiddddd | Rd = memub(Ii) - .encoding = { .mask = 0xf9e00000, .op = 0x49200000 }, - .id = HEX_INS_PS_LOADRUBABS, - .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x5, 16 }, { 0x2, 25 } }, .syntax = 9 }, - }, - .pred = HEX_NOPRED, - .cond = RZ_TYPE_COND_AL, - .type = RZ_ANALYSIS_OP_TYPE_NULL, - .syntax = " = memub()", - }, - { - // 01001ii1011iiiiiPPiiiiiiiiiddddd | Rd = memuh(Ii) - .encoding = { .mask = 0xf9e00000, .op = 0x49600000 }, - .id = HEX_INS_PS_LOADRUHABS, - .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x5, 16 }, { 0x2, 25 } }, .imm_scale = 1, .syntax = 9 }, - }, - .pred = HEX_NOPRED, - .cond = RZ_TYPE_COND_AL, - .type = RZ_ANALYSIS_OP_TYPE_NULL, - .syntax = " = memuh()", - }, - { - // 01001ii0000iiiiiPPitttttiiiiiiii | memb(Ii) = Rt - .encoding = { .mask = 0xf9e00000, .op = 0x48000000 }, - .id = HEX_INS_PS_STORERBABS, - .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 25 } }, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - }, - .pred = HEX_NOPRED, - .cond = RZ_TYPE_COND_AL, - .type = RZ_ANALYSIS_OP_TYPE_NULL, - .syntax = "memb() = ", - }, - { - // 01001ii0101iiiiiPPi00tttiiiiiiii | memb(Ii) = Nt.new - .encoding = { .mask = 0xf9e01800, .op = 0x48a00000 }, - .id = HEX_INS_PS_STORERBNEWABS, - .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 25 } }, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - }, - .pred = HEX_NOPRED, - .cond = RZ_TYPE_COND_AL, - .type = RZ_ANALYSIS_OP_TYPE_NULL, - .syntax = "memb() = .new", - }, - { - // 01001ii0110iiiiiPPitttttiiiiiiii | memd(Ii) = Rtt - .encoding = { .mask = 0xf9e00000, .op = 0x48c00000 }, - .id = HEX_INS_PS_STORERDABS, - .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 25 } }, .imm_scale = 3, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - }, - .pred = HEX_NOPRED, - .cond = RZ_TYPE_COND_AL, - .type = RZ_ANALYSIS_OP_TYPE_NULL, - .syntax = "memd() = ", - }, - { - // 01001ii0011iiiiiPPitttttiiiiiiii | memh(Ii) = Rt.h - .encoding = { .mask = 0xf9e00000, .op = 0x48600000 }, - .id = HEX_INS_PS_STORERFABS, - .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 25 } }, .imm_scale = 1, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - }, - .pred = HEX_NOPRED, - .cond = RZ_TYPE_COND_AL, - .type = RZ_ANALYSIS_OP_TYPE_NULL, - .syntax = "memh() = .h", - }, - { - // 01001ii0010iiiiiPPitttttiiiiiiii | memh(Ii) = Rt - .encoding = { .mask = 0xf9e00000, .op = 0x48400000 }, - .id = HEX_INS_PS_STORERHABS, - .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 25 } }, .imm_scale = 1, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - }, - .pred = HEX_NOPRED, - .cond = RZ_TYPE_COND_AL, - .type = RZ_ANALYSIS_OP_TYPE_NULL, - .syntax = "memh() = ", - }, - { - // 01001ii0101iiiiiPPi01tttiiiiiiii | memh(Ii) = Nt.new - .encoding = { .mask = 0xf9e01800, .op = 0x48a00800 }, - .id = HEX_INS_PS_STORERHNEWABS, - .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 25 } }, .imm_scale = 1, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - }, - .pred = HEX_NOPRED, - .cond = RZ_TYPE_COND_AL, - .type = RZ_ANALYSIS_OP_TYPE_NULL, - .syntax = "memh() = .new", - }, - { - // 01001ii0100iiiiiPPitttttiiiiiiii | memw(Ii) = Rt - .encoding = { .mask = 0xf9e00000, .op = 0x48800000 }, - .id = HEX_INS_PS_STORERIABS, - .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 25 } }, .imm_scale = 2, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - }, - .pred = HEX_NOPRED, - .cond = RZ_TYPE_COND_AL, - .type = RZ_ANALYSIS_OP_TYPE_NULL, - .syntax = "memw() = ", - }, - { - // 01001ii0101iiiiiPPi10tttiiiiiiii | memw(Ii) = Nt.new - .encoding = { .mask = 0xf9e01800, .op = 0x48a01000 }, - .id = HEX_INS_PS_STORERINEWABS, - .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 25 } }, .imm_scale = 2, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - }, - .pred = HEX_NOPRED, - .cond = RZ_TYPE_COND_AL, - .type = RZ_ANALYSIS_OP_TYPE_NULL, - .syntax = "memw() = .new", - }, { // 01000100000sssssPPitttttiiiii0vv | if (!Pv) memb(Rs+Ii) = Rt .encoding = { .mask = 0xffe00004, .op = 0x44000000 }, .id = HEX_INS_S2_PSTORERBF_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 17 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -14080,10 +13899,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe01804, .op = 0x44a00000 }, .id = HEX_INS_S2_PSTORERBNEWF_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 17 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -14096,10 +13915,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe01804, .op = 0x40a00000 }, .id = HEX_INS_S2_PSTORERBNEWT_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -14112,10 +13931,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe00004, .op = 0x40000000 }, .id = HEX_INS_S2_PSTORERBT_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -14128,10 +13947,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe00004, .op = 0x44c00000 }, .id = HEX_INS_S2_PSTORERDF_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .imm_scale = 3, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .isa_id = 'u', .imm_scale = 3, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 17 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -14144,10 +13963,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe00004, .op = 0x40c00000 }, .id = HEX_INS_S2_PSTORERDT_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .imm_scale = 3, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .isa_id = 'u', .imm_scale = 3, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -14160,10 +13979,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe00004, .op = 0x44600000 }, .id = HEX_INS_S2_PSTORERFF_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .imm_scale = 1, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 17 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -14176,10 +13995,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe00004, .op = 0x40600000 }, .id = HEX_INS_S2_PSTORERFT_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .imm_scale = 1, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -14192,10 +14011,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe00004, .op = 0x44400000 }, .id = HEX_INS_S2_PSTORERHF_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .imm_scale = 1, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 17 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -14208,10 +14027,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe01804, .op = 0x44a00800 }, .id = HEX_INS_S2_PSTORERHNEWF_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .imm_scale = 1, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 17 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -14224,10 +14043,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe01804, .op = 0x40a00800 }, .id = HEX_INS_S2_PSTORERHNEWT_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .imm_scale = 1, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -14240,10 +14059,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe00004, .op = 0x40400000 }, .id = HEX_INS_S2_PSTORERHT_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .imm_scale = 1, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -14256,10 +14075,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe00004, .op = 0x44800000 }, .id = HEX_INS_S2_PSTORERIF_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .imm_scale = 2, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .isa_id = 'u', .imm_scale = 2, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 17 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -14272,10 +14091,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe01804, .op = 0x44a01000 }, .id = HEX_INS_S2_PSTORERINEWF_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .imm_scale = 2, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .isa_id = 'u', .imm_scale = 2, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 17 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -14288,10 +14107,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe01804, .op = 0x40a01000 }, .id = HEX_INS_S2_PSTORERINEWT_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .imm_scale = 2, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .isa_id = 'u', .imm_scale = 2, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -14304,10 +14123,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe00004, .op = 0x40800000 }, .id = HEX_INS_S2_PSTORERIT_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .imm_scale = 2, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .isa_id = 'u', .imm_scale = 2, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -14320,8 +14139,8 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xf9e00000, .op = 0x48000000 }, .id = HEX_INS_S2_STORERBGP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 25 } }, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 25 } }, .isa_id = 'u', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -14333,8 +14152,8 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xf9e01800, .op = 0x48a00000 }, .id = HEX_INS_S2_STORERBNEWGP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 25 } }, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 25 } }, .isa_id = 'u', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -14346,8 +14165,8 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xf9e00000, .op = 0x48c00000 }, .id = HEX_INS_S2_STORERDGP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 25 } }, .imm_scale = 3, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 25 } }, .isa_id = 'u', .imm_scale = 3, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -14359,8 +14178,8 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xf9e00000, .op = 0x48600000 }, .id = HEX_INS_S2_STORERFGP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 25 } }, .imm_scale = 1, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 25 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -14372,8 +14191,8 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xf9e00000, .op = 0x48400000 }, .id = HEX_INS_S2_STORERHGP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 25 } }, .imm_scale = 1, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 25 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -14385,8 +14204,8 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xf9e01800, .op = 0x48a00800 }, .id = HEX_INS_S2_STORERHNEWGP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 25 } }, .imm_scale = 1, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 25 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -14398,8 +14217,8 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xf9e00000, .op = 0x48800000 }, .id = HEX_INS_S2_STORERIGP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 25 } }, .imm_scale = 2, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 25 } }, .isa_id = 'u', .imm_scale = 2, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -14411,8 +14230,8 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xf9e01800, .op = 0x48a01000 }, .id = HEX_INS_S2_STORERINEWGP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 25 } }, .imm_scale = 2, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 25 } }, .isa_id = 'u', .imm_scale = 2, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -14424,10 +14243,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe00004, .op = 0x46000000 }, .id = HEX_INS_S4_PSTORERBFNEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 21 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -14440,10 +14259,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe01804, .op = 0x46a00000 }, .id = HEX_INS_S4_PSTORERBNEWFNEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 21 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -14456,10 +14275,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe01804, .op = 0x42a00000 }, .id = HEX_INS_S4_PSTORERBNEWTNEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 20 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -14472,10 +14291,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe00004, .op = 0x42000000 }, .id = HEX_INS_S4_PSTORERBTNEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 20 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -14488,10 +14307,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe00004, .op = 0x46c00000 }, .id = HEX_INS_S4_PSTORERDFNEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .imm_scale = 3, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .isa_id = 'u', .imm_scale = 3, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 21 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -14504,10 +14323,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe00004, .op = 0x42c00000 }, .id = HEX_INS_S4_PSTORERDTNEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .imm_scale = 3, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .isa_id = 'u', .imm_scale = 3, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 20 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -14520,10 +14339,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe00004, .op = 0x46600000 }, .id = HEX_INS_S4_PSTORERFFNEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .imm_scale = 1, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 21 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -14536,10 +14355,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe00004, .op = 0x42600000 }, .id = HEX_INS_S4_PSTORERFTNEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .imm_scale = 1, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 20 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -14552,10 +14371,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe00004, .op = 0x46400000 }, .id = HEX_INS_S4_PSTORERHFNEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .imm_scale = 1, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 21 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -14568,10 +14387,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe01804, .op = 0x46a00800 }, .id = HEX_INS_S4_PSTORERHNEWFNEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .imm_scale = 1, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 21 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -14584,10 +14403,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe01804, .op = 0x42a00800 }, .id = HEX_INS_S4_PSTORERHNEWTNEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .imm_scale = 1, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 20 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -14600,10 +14419,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe00004, .op = 0x42400000 }, .id = HEX_INS_S4_PSTORERHTNEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .imm_scale = 1, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .isa_id = 'u', .imm_scale = 1, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 20 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -14616,10 +14435,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe00004, .op = 0x46800000 }, .id = HEX_INS_S4_PSTORERIFNEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .imm_scale = 2, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .isa_id = 'u', .imm_scale = 2, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 21 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -14632,10 +14451,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe01804, .op = 0x46a01000 }, .id = HEX_INS_S4_PSTORERINEWFNEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .imm_scale = 2, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .isa_id = 'u', .imm_scale = 2, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 21 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -14648,10 +14467,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe01804, .op = 0x42a01000 }, .id = HEX_INS_S4_PSTORERINEWTNEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .imm_scale = 2, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .isa_id = 'u', .imm_scale = 2, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 20 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -14664,10 +14483,10 @@ static const HexInsnTemplate templates_normal_0x4[] = { .encoding = { .mask = 0xffe00004, .op = 0x42800000 }, .id = HEX_INS_S4_PSTORERITNEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .imm_scale = 2, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x5, 3 }, { 0x1, 13 } }, .isa_id = 'u', .imm_scale = 2, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 20 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -14684,7 +14503,7 @@ static const HexInsnTemplate templates_normal_0x5[] = { .encoding = { .mask = 0xfe000001, .op = 0x5a000000 }, .id = HEX_INS_J2_CALL, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0xd, 1 }, { 0x9, 16 } }, .imm_scale = 2, .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0xd, 1 }, { 0x9, 16 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 5 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -14697,8 +14516,8 @@ static const HexInsnTemplate templates_normal_0x5[] = { .encoding = { .mask = 0xff201c01, .op = 0x5d200000 }, .id = HEX_INS_J2_CALLF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 22 } }, .imm_scale = 2, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 22 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 12 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -14711,7 +14530,7 @@ static const HexInsnTemplate templates_normal_0x5[] = { .encoding = { .mask = 0xffe03fff, .op = 0x50a00000 }, .id = HEX_INS_J2_CALLR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 6 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -14724,8 +14543,8 @@ static const HexInsnTemplate templates_normal_0x5[] = { .encoding = { .mask = 0xffe03cff, .op = 0x51200000 }, .id = HEX_INS_J2_CALLRF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 13 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -14738,7 +14557,7 @@ static const HexInsnTemplate templates_normal_0x5[] = { .encoding = { .mask = 0xffe03fff, .op = 0x50c00000 }, .id = HEX_INS_J2_CALLRH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -14751,8 +14570,8 @@ static const HexInsnTemplate templates_normal_0x5[] = { .encoding = { .mask = 0xffe03cff, .op = 0x51000000 }, .id = HEX_INS_J2_CALLRT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -14765,8 +14584,8 @@ static const HexInsnTemplate templates_normal_0x5[] = { .encoding = { .mask = 0xff201c01, .op = 0x5d000000 }, .id = HEX_INS_J2_CALLT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 22 } }, .imm_scale = 2, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 22 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 11 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -14779,7 +14598,7 @@ static const HexInsnTemplate templates_normal_0x5[] = { .encoding = { .mask = 0xfe000001, .op = 0x58000000 }, .id = HEX_INS_J2_JUMP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0xd, 1 }, { 0x9, 16 } }, .imm_scale = 2, .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0xd, 1 }, { 0x9, 16 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 5 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -14792,8 +14611,8 @@ static const HexInsnTemplate templates_normal_0x5[] = { .encoding = { .mask = 0xff201c01, .op = 0x5c200000 }, .id = HEX_INS_J2_JUMPF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 22 } }, .imm_scale = 2, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 22 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 15 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -14806,8 +14625,8 @@ static const HexInsnTemplate templates_normal_0x5[] = { .encoding = { .mask = 0xff201c01, .op = 0x5c200800 }, .id = HEX_INS_J2_JUMPFNEW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 22 } }, .imm_scale = 2, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 22 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 19 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -14820,8 +14639,8 @@ static const HexInsnTemplate templates_normal_0x5[] = { .encoding = { .mask = 0xff201c01, .op = 0x5c201800 }, .id = HEX_INS_J2_JUMPFNEWPT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 22 } }, .imm_scale = 2, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 22 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 18 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -14834,8 +14653,8 @@ static const HexInsnTemplate templates_normal_0x5[] = { .encoding = { .mask = 0xff201c01, .op = 0x5c201000 }, .id = HEX_INS_J2_JUMPFPT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 22 } }, .imm_scale = 2, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 22 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 14 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -14848,7 +14667,7 @@ static const HexInsnTemplate templates_normal_0x5[] = { .encoding = { .mask = 0xffe03fff, .op = 0x52800000 }, .id = HEX_INS_J2_JUMPR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 6 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -14861,8 +14680,8 @@ static const HexInsnTemplate templates_normal_0x5[] = { .encoding = { .mask = 0xffe03cff, .op = 0x53600000 }, .id = HEX_INS_J2_JUMPRF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 16 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -14875,8 +14694,8 @@ static const HexInsnTemplate templates_normal_0x5[] = { .encoding = { .mask = 0xffe03cff, .op = 0x53600800 }, .id = HEX_INS_J2_JUMPRFNEW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 20 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -14889,8 +14708,8 @@ static const HexInsnTemplate templates_normal_0x5[] = { .encoding = { .mask = 0xffe03cff, .op = 0x53601800 }, .id = HEX_INS_J2_JUMPRFNEWPT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 19 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -14903,8 +14722,8 @@ static const HexInsnTemplate templates_normal_0x5[] = { .encoding = { .mask = 0xffe03cff, .op = 0x53601000 }, .id = HEX_INS_J2_JUMPRFPT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -14917,7 +14736,7 @@ static const HexInsnTemplate templates_normal_0x5[] = { .encoding = { .mask = 0xffe03fff, .op = 0x52c00000 }, .id = HEX_INS_J2_JUMPRH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -14930,8 +14749,8 @@ static const HexInsnTemplate templates_normal_0x5[] = { .encoding = { .mask = 0xffe03cff, .op = 0x53400000 }, .id = HEX_INS_J2_JUMPRT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -14944,8 +14763,8 @@ static const HexInsnTemplate templates_normal_0x5[] = { .encoding = { .mask = 0xffe03cff, .op = 0x53400800 }, .id = HEX_INS_J2_JUMPRTNEW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 19 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -14958,8 +14777,8 @@ static const HexInsnTemplate templates_normal_0x5[] = { .encoding = { .mask = 0xffe03cff, .op = 0x53401800 }, .id = HEX_INS_J2_JUMPRTNEWPT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 18 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -14972,8 +14791,8 @@ static const HexInsnTemplate templates_normal_0x5[] = { .encoding = { .mask = 0xffe03cff, .op = 0x53401000 }, .id = HEX_INS_J2_JUMPRTPT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 14 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -14986,8 +14805,8 @@ static const HexInsnTemplate templates_normal_0x5[] = { .encoding = { .mask = 0xff201c01, .op = 0x5c000000 }, .id = HEX_INS_J2_JUMPT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 22 } }, .imm_scale = 2, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 22 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 14 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -15000,8 +14819,8 @@ static const HexInsnTemplate templates_normal_0x5[] = { .encoding = { .mask = 0xff201c01, .op = 0x5c000800 }, .id = HEX_INS_J2_JUMPTNEW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 22 } }, .imm_scale = 2, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 22 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 18 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -15014,8 +14833,8 @@ static const HexInsnTemplate templates_normal_0x5[] = { .encoding = { .mask = 0xff201c01, .op = 0x5c001800 }, .id = HEX_INS_J2_JUMPTNEWPT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 22 } }, .imm_scale = 2, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 22 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 17 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -15028,8 +14847,8 @@ static const HexInsnTemplate templates_normal_0x5[] = { .encoding = { .mask = 0xff201c01, .op = 0x5c001000 }, .id = HEX_INS_J2_JUMPTPT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 22 } }, .imm_scale = 2, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 1 }, { 0x1, 13 }, { 0x5, 16 }, { 0x2, 22 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 13 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -15042,7 +14861,7 @@ static const HexInsnTemplate templates_normal_0x5[] = { .encoding = { .mask = 0xfffc20e3, .op = 0x54400000 }, .id = HEX_INS_J2_PAUSE, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 2 }, { 0x5, 8 }, { 0x2, 16 } }, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 2 }, { 0x5, 8 }, { 0x2, 16 } }, .isa_id = 'u', .syntax = 6 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15064,7 +14883,7 @@ static const HexInsnTemplate templates_normal_0x5[] = { .encoding = { .mask = 0xffff20e3, .op = 0x54000000 }, .id = HEX_INS_J2_TRAP0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 2 }, { 0x5, 8 } }, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 2 }, { 0x5, 8 } }, .isa_id = 'u', .syntax = 6 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15076,8 +14895,8 @@ static const HexInsnTemplate templates_normal_0x5[] = { .encoding = { .mask = 0xffe020e3, .op = 0x54800000 }, .id = HEX_INS_J2_TRAP1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 2 }, { 0x5, 8 } }, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 2 }, { 0x5, 8 } }, .isa_id = 'u', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15099,7 +14918,7 @@ static const HexInsnTemplate templates_normal_0x5[] = { .encoding = { .mask = 0xffe03fff, .op = 0x52a00000 }, .id = HEX_INS_J4_HINTJUMPR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15107,25 +14926,13 @@ static const HexInsnTemplate templates_normal_0x5[] = { .syntax = "hintjr()", .flags = HEX_INSN_TEMPLATE_FLAG_HAS_JMP_TGT, }, - { - // 0101010010000000PP0iiiii000iii00 | trap1(Ii) - .encoding = { .mask = 0xffff20e3, .op = 0x54800000 }, - .id = HEX_INS_PS_TRAP1, - .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 2 }, { 0x5, 8 } }, .syntax = 6 }, - }, - .pred = HEX_NOPRED, - .cond = RZ_TYPE_COND_AL, - .type = RZ_ANALYSIS_OP_TYPE_TRAP, - .syntax = "trap1()", - }, { // 01010101101sssssPP000000000ddddd | Rd = icdatar(Rs) .encoding = { .mask = 0xffe03fe0, .op = 0x55a00000 }, .id = HEX_INS_Y2_ICDATAR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15137,8 +14944,8 @@ static const HexInsnTemplate templates_normal_0x5[] = { .encoding = { .mask = 0xffe020ff, .op = 0x55c02000 }, .id = HEX_INS_Y2_ICDATAW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15150,7 +14957,7 @@ static const HexInsnTemplate templates_normal_0x5[] = { .encoding = { .mask = 0xffe03fff, .op = 0x56c00000 }, .id = HEX_INS_Y2_ICINVA, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15162,7 +14969,7 @@ static const HexInsnTemplate templates_normal_0x5[] = { .encoding = { .mask = 0xffe03fff, .op = 0x56c00800 }, .id = HEX_INS_Y2_ICINVIDX, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15183,8 +14990,8 @@ static const HexInsnTemplate templates_normal_0x5[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x55e00000 }, .id = HEX_INS_Y2_ICTAGR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15196,8 +15003,8 @@ static const HexInsnTemplate templates_normal_0x5[] = { .encoding = { .mask = 0xffe020ff, .op = 0x55c00000 }, .id = HEX_INS_Y2_ICTAGW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15222,8 +15029,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x6a000000 }, .id = HEX_INS_A2_TFRCRR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_CTR_REGS, .syntax = 3 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_CTR_REGS, .isa_id = 's', .syntax = 3 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15235,8 +15042,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x62200000 }, .id = HEX_INS_A2_TFRRCR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_CTR_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 3 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_CTR_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 3 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15248,8 +15055,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x68000000 }, .id = HEX_INS_A4_TFRCPP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_CTR_REGS64, .syntax = 3 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_CTR_REGS64, .isa_id = 's', .syntax = 3 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15261,8 +15068,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x63200000 }, .id = HEX_INS_A4_TFRPCP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_CTR_REGS64, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 3 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_CTR_REGS64, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 3 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15274,8 +15081,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xfffc3ffc, .op = 0x6ba00000 }, .id = HEX_INS_C2_ALL8, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15287,9 +15094,9 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xfffc3cfc, .op = 0x6b000000 }, .id = HEX_INS_C2_AND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15301,9 +15108,9 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xfffc3cfc, .op = 0x6b600000 }, .id = HEX_INS_C2_ANDN, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 's', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15315,8 +15122,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xfffc3ffc, .op = 0x6b800000 }, .id = HEX_INS_C2_ANY8, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15328,8 +15135,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xfffc3ffc, .op = 0x6bc00000 }, .id = HEX_INS_C2_NOT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 's', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15341,9 +15148,9 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xfffc3cfc, .op = 0x6b200000 }, .id = HEX_INS_C2_OR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 's', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15355,9 +15162,9 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xfffc3cfc, .op = 0x6be00000 }, .id = HEX_INS_C2_ORN, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15369,9 +15176,9 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xfffc3cfc, .op = 0x6b400000 }, .id = HEX_INS_C2_XOR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15383,8 +15190,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffff2060, .op = 0x6a490000 }, .id = HEX_INS_C4_ADDIPC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 7 } }, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15396,10 +15203,10 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xfffc3c3c, .op = 0x6b100000 }, .id = HEX_INS_C4_AND_AND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 6 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 6 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15411,10 +15218,10 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xfffc3c3c, .op = 0x6b900000 }, .id = HEX_INS_C4_AND_ANDN, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 6 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 6 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15426,10 +15233,10 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xfffc3c3c, .op = 0x6b300000 }, .id = HEX_INS_C4_AND_OR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 6 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 6 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15441,10 +15248,10 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xfffc3c3c, .op = 0x6bb00000 }, .id = HEX_INS_C4_AND_ORN, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 6 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 6 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15456,9 +15263,9 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xfffc3cfc, .op = 0x6b002090 }, .id = HEX_INS_C4_FASTCORNER9, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 's', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15470,9 +15277,9 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xfffc3cfc, .op = 0x6b102090 }, .id = HEX_INS_C4_FASTCORNER9_NOT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 's', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15484,10 +15291,10 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xfffc3c3c, .op = 0x6b500000 }, .id = HEX_INS_C4_OR_AND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 6 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 's', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 6 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15499,10 +15306,10 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xfffc3c3c, .op = 0x6bd00000 }, .id = HEX_INS_C4_OR_ANDN, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 6 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 's', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 6 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15514,10 +15321,10 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xfffc3c3c, .op = 0x6b700000 }, .id = HEX_INS_C4_OR_OR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 6 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 's', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 6 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15529,10 +15336,10 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xfffc3c3c, .op = 0x6bf00000 }, .id = HEX_INS_C4_OR_ORN, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 6 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 's', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 6 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15544,8 +15351,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x68200000 }, .id = HEX_INS_G4_TFRGCPP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_GUEST_REGS64, .syntax = 3 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_GUEST_REGS64, .isa_id = 's', .syntax = 3 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15557,8 +15364,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x6a200000 }, .id = HEX_INS_G4_TFRGCRR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_GUEST_REGS, .syntax = 3 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_GUEST_REGS, .isa_id = 's', .syntax = 3 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15570,8 +15377,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x63000000 }, .id = HEX_INS_G4_TFRGPCP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_GUEST_REGS64, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 3 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_GUEST_REGS64, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 3 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15583,8 +15390,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x62000000 }, .id = HEX_INS_G4_TFRGRCR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_GUEST_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 3 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_GUEST_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 3 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15596,8 +15403,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffc01001, .op = 0x61400000 }, .id = HEX_INS_J2_JUMPRGTEZ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE, .masks = { { 0xb, 1 }, { 0x1, 13 }, { 0x1, 21 } }, .imm_scale = 2, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE, .masks = { { 0xb, 1 }, { 0x1, 13 }, { 0x1, 21 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 18 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -15610,8 +15417,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffc01001, .op = 0x61401000 }, .id = HEX_INS_J2_JUMPRGTEZPT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE, .masks = { { 0xb, 1 }, { 0x1, 13 }, { 0x1, 21 } }, .imm_scale = 2, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE, .masks = { { 0xb, 1 }, { 0x1, 13 }, { 0x1, 21 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 17 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -15624,8 +15431,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffc01001, .op = 0x61c00000 }, .id = HEX_INS_J2_JUMPRLTEZ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE, .masks = { { 0xb, 1 }, { 0x1, 13 }, { 0x1, 21 } }, .imm_scale = 2, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE, .masks = { { 0xb, 1 }, { 0x1, 13 }, { 0x1, 21 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 18 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -15638,8 +15445,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffc01001, .op = 0x61c01000 }, .id = HEX_INS_J2_JUMPRLTEZPT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE, .masks = { { 0xb, 1 }, { 0x1, 13 }, { 0x1, 21 } }, .imm_scale = 2, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE, .masks = { { 0xb, 1 }, { 0x1, 13 }, { 0x1, 21 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 17 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -15652,8 +15459,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffc01001, .op = 0x61800000 }, .id = HEX_INS_J2_JUMPRNZ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE, .masks = { { 0xb, 1 }, { 0x1, 13 }, { 0x1, 21 } }, .imm_scale = 2, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE, .masks = { { 0xb, 1 }, { 0x1, 13 }, { 0x1, 21 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 18 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -15666,8 +15473,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffc01001, .op = 0x61801000 }, .id = HEX_INS_J2_JUMPRNZPT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE, .masks = { { 0xb, 1 }, { 0x1, 13 }, { 0x1, 21 } }, .imm_scale = 2, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE, .masks = { { 0xb, 1 }, { 0x1, 13 }, { 0x1, 21 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 17 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -15680,8 +15487,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffc01001, .op = 0x61000000 }, .id = HEX_INS_J2_JUMPRZ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE, .masks = { { 0xb, 1 }, { 0x1, 13 }, { 0x1, 21 } }, .imm_scale = 2, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE, .masks = { { 0xb, 1 }, { 0x1, 13 }, { 0x1, 21 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 18 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -15694,8 +15501,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffc01001, .op = 0x61001000 }, .id = HEX_INS_J2_JUMPRZPT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE, .masks = { { 0xb, 1 }, { 0x1, 13 }, { 0x1, 21 } }, .imm_scale = 2, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE, .masks = { { 0xb, 1 }, { 0x1, 13 }, { 0x1, 21 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 17 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -15708,8 +15515,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe02004, .op = 0x69000000 }, .id = HEX_INS_J2_LOOP0I, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 3 }, { 0x5, 8 } }, .imm_scale = 2, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x2, 0 }, { 0x3, 5 }, { 0x5, 16 } }, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 3 }, { 0x5, 8 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x2, 0 }, { 0x3, 5 }, { 0x5, 16 } }, .isa_id = 'U', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15722,8 +15529,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe020e7, .op = 0x60000000 }, .id = HEX_INS_J2_LOOP0R, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 3 }, { 0x5, 8 } }, .imm_scale = 2, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 3 }, { 0x5, 8 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15736,8 +15543,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe02004, .op = 0x69200000 }, .id = HEX_INS_J2_LOOP1I, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 3 }, { 0x5, 8 } }, .imm_scale = 2, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x2, 0 }, { 0x3, 5 }, { 0x5, 16 } }, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 3 }, { 0x5, 8 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x2, 0 }, { 0x3, 5 }, { 0x5, 16 } }, .isa_id = 'U', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15750,8 +15557,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe020e7, .op = 0x60200000 }, .id = HEX_INS_J2_LOOP1R, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 3 }, { 0x5, 8 } }, .imm_scale = 2, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 3 }, { 0x5, 8 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15764,8 +15571,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe02004, .op = 0x69a00000 }, .id = HEX_INS_J2_PLOOP1SI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 3 }, { 0x5, 8 } }, .imm_scale = 2, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x2, 0 }, { 0x3, 5 }, { 0x5, 16 } }, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 3 }, { 0x5, 8 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x2, 0 }, { 0x3, 5 }, { 0x5, 16 } }, .isa_id = 'U', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15778,8 +15585,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe020e7, .op = 0x60a00000 }, .id = HEX_INS_J2_PLOOP1SR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 3 }, { 0x5, 8 } }, .imm_scale = 2, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 3 }, { 0x5, 8 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15792,8 +15599,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe02004, .op = 0x69c00000 }, .id = HEX_INS_J2_PLOOP2SI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 3 }, { 0x5, 8 } }, .imm_scale = 2, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x2, 0 }, { 0x3, 5 }, { 0x5, 16 } }, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 3 }, { 0x5, 8 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x2, 0 }, { 0x3, 5 }, { 0x5, 16 } }, .isa_id = 'U', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15806,8 +15613,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe020e7, .op = 0x60c00000 }, .id = HEX_INS_J2_PLOOP2SR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 3 }, { 0x5, 8 } }, .imm_scale = 2, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 3 }, { 0x5, 8 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15820,8 +15627,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe02004, .op = 0x69e00000 }, .id = HEX_INS_J2_PLOOP3SI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 3 }, { 0x5, 8 } }, .imm_scale = 2, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x2, 0 }, { 0x3, 5 }, { 0x5, 16 } }, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 3 }, { 0x5, 8 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x2, 0 }, { 0x3, 5 }, { 0x5, 16 } }, .isa_id = 'U', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15834,8 +15641,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe020e7, .op = 0x60e00000 }, .id = HEX_INS_J2_PLOOP3SR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 3 }, { 0x5, 8 } }, .imm_scale = 2, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_PC_RELATIVE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 3 }, { 0x5, 8 } }, .isa_id = 'r', .imm_scale = 2, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15857,7 +15664,7 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe03fff, .op = 0x64000060 }, .id = HEX_INS_Y2_CIAD, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15869,7 +15676,7 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe03fff, .op = 0x65000000 }, .id = HEX_INS_Y2_CRSWAP0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15881,7 +15688,7 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe03fff, .op = 0x64000020 }, .id = HEX_INS_Y2_CSWI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15893,8 +15700,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x66000000 }, .id = HEX_INS_Y2_GETIMASK, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15906,8 +15713,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x66600000 }, .id = HEX_INS_Y2_IASSIGNR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15919,7 +15726,7 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe03fff, .op = 0x64000040 }, .id = HEX_INS_Y2_IASSIGNW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15949,7 +15756,7 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe03fff, .op = 0x64400020 }, .id = HEX_INS_Y2_RESUME, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15961,8 +15768,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe03cff, .op = 0x64800000 }, .id = HEX_INS_Y2_SETIMASK, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15974,8 +15781,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe03cff, .op = 0x64800020 }, .id = HEX_INS_Y2_SETPRIO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15987,7 +15794,7 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe03fff, .op = 0x64600020 }, .id = HEX_INS_Y2_START, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 6 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -15999,7 +15806,7 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe03fff, .op = 0x64600000 }, .id = HEX_INS_Y2_STOP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16011,7 +15818,7 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe03fff, .op = 0x64000000 }, .id = HEX_INS_Y2_SWI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 4 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16023,8 +15830,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xff803fe0, .op = 0x6e800000 }, .id = HEX_INS_Y2_TFRSCRR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x7, 16 } }, .reg_cls = HEX_REG_CLASS_SYS_REGS, .syntax = 3 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x7, 16 } }, .reg_cls = HEX_REG_CLASS_SYS_REGS, .isa_id = 's', .syntax = 3 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16036,8 +15843,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe03f80, .op = 0x67000000 }, .id = HEX_INS_Y2_TFRSRCR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x7, 0 } }, .reg_cls = HEX_REG_CLASS_SYS_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 3 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x7, 0 } }, .reg_cls = HEX_REG_CLASS_SYS_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 3 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16058,8 +15865,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x6c800000 }, .id = HEX_INS_Y2_TLBP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16071,8 +15878,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x6c400000 }, .id = HEX_INS_Y2_TLBR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16093,8 +15900,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe020ff, .op = 0x6c000000 }, .id = HEX_INS_Y2_TLBW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 6 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16106,7 +15913,7 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe03fff, .op = 0x64400000 }, .id = HEX_INS_Y2_WAIT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16118,7 +15925,7 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe03fff, .op = 0x65200000 }, .id = HEX_INS_Y4_CRSWAP1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16130,7 +15937,7 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe03fff, .op = 0x6d800000 }, .id = HEX_INS_Y4_CRSWAP10, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16142,7 +15949,7 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe03fff, .op = 0x64600040 }, .id = HEX_INS_Y4_NMI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 4 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16154,7 +15961,7 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe03fff, .op = 0x64800060 }, .id = HEX_INS_Y4_SIAD, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16166,8 +15973,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xff803fe0, .op = 0x6f000000 }, .id = HEX_INS_Y4_TFRSCPP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x7, 16 } }, .reg_cls = HEX_REG_CLASS_SYS_REGS64, .syntax = 3 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x7, 16 } }, .reg_cls = HEX_REG_CLASS_SYS_REGS64, .isa_id = 's', .syntax = 3 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16179,8 +15986,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe03f80, .op = 0x6d000000 }, .id = HEX_INS_Y4_TFRSPCP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x7, 0 } }, .reg_cls = HEX_REG_CLASS_SYS_REGS64, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 3 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x7, 0 } }, .reg_cls = HEX_REG_CLASS_SYS_REGS64, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 3 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16192,7 +15999,7 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe03fff, .op = 0x62400000 }, .id = HEX_INS_Y4_TRACE, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 6 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16204,9 +16011,9 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe020e0, .op = 0x6cc00000 }, .id = HEX_INS_Y5_CTLBW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16218,7 +16025,7 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe03fff, .op = 0x6ca00000 }, .id = HEX_INS_Y5_TLBASIDI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16230,8 +16037,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x6ce00000 }, .id = HEX_INS_Y5_TLBOC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16243,7 +16050,7 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe03fff, .op = 0x62400020 }, .id = HEX_INS_Y6_DIAG, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16255,8 +16062,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe020ff, .op = 0x62400040 }, .id = HEX_INS_Y6_DIAG0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16268,8 +16075,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe020ff, .op = 0x62400060 }, .id = HEX_INS_Y6_DIAG1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16281,8 +16088,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xff803fe0, .op = 0x6e800000 }, .id = HEX_INS_IMPORTED_RD_SS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x7, 16 } }, .reg_cls = HEX_REG_CLASS_SYS_REGS, .syntax = 3 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x7, 16 } }, .reg_cls = HEX_REG_CLASS_SYS_REGS, .isa_id = 's', .syntax = 3 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16294,8 +16101,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xff803fe0, .op = 0x6f000000 }, .id = HEX_INS_IMPORTED_RDD_SSS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x7, 16 } }, .reg_cls = HEX_REG_CLASS_SYS_REGS64, .syntax = 3 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x7, 16 } }, .reg_cls = HEX_REG_CLASS_SYS_REGS64, .isa_id = 's', .syntax = 3 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16307,8 +16114,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffe03f80, .op = 0x67000000 }, .id = HEX_INS_IMPORTED_SD_RS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x7, 0 } }, .reg_cls = HEX_REG_CLASS_SYS_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 3 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x7, 0 } }, .reg_cls = HEX_REG_CLASS_SYS_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 3 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16320,8 +16127,8 @@ static const HexInsnTemplate templates_normal_0x6[] = { .encoding = { .mask = 0xffc00000, .op = 0x6d000000 }, .id = HEX_INS_IMPORTED_SDD_RSS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x7, 0 } }, .reg_cls = HEX_REG_CLASS_SYS_REGS64, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 3 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x7, 0 } }, .reg_cls = HEX_REG_CLASS_SYS_REGS64, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 3 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16337,9 +16144,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffc00000, .op = 0x76000000 }, .id = HEX_INS_A2_ANDIR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x1, 21 } }, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x1, 21 } }, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16351,8 +16158,8 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x70000000 }, .id = HEX_INS_A2_ASLH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16364,8 +16171,8 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x70200000 }, .id = HEX_INS_A2_ASRH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16377,9 +16184,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xff800000, .op = 0x7c000000 }, .id = HEX_INS_A2_COMBINEII, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED, .masks = { { 0x1, 13 }, { 0x7, 16 } }, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED, .masks = { { 0x1, 13 }, { 0x7, 16 } }, .isa_id = 'S', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16400,9 +16207,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffc00000, .op = 0x76800000 }, .id = HEX_INS_A2_ORIR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x1, 21 } }, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x1, 21 } }, .isa_id = 's', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16414,10 +16221,10 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xff802000, .op = 0x74800000 }, .id = HEX_INS_A2_PADDIF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 21 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 21 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .isa_id = 's', .syntax = 15 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -16430,10 +16237,10 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xff802000, .op = 0x74802000 }, .id = HEX_INS_A2_PADDIFNEW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 21 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 21 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .isa_id = 's', .syntax = 19 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -16446,10 +16253,10 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xff802000, .op = 0x74000000 }, .id = HEX_INS_A2_PADDIT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 21 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 21 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .isa_id = 's', .syntax = 14 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -16462,10 +16269,10 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xff802000, .op = 0x74002000 }, .id = HEX_INS_A2_PADDITNEW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 21 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 21 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .isa_id = 's', .syntax = 18 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -16478,9 +16285,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffc00000, .op = 0x76400000 }, .id = HEX_INS_A2_SUBRI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x1, 21 } }, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x1, 21 } }, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16492,8 +16299,8 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x70a00000 }, .id = HEX_INS_A2_SXTB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16505,8 +16312,8 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x70e00000 }, .id = HEX_INS_A2_SXTH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16518,8 +16325,8 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x70600000 }, .id = HEX_INS_A2_TFR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 3 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 3 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16531,8 +16338,8 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xff200000, .op = 0x72200000 }, .id = HEX_INS_A2_TFRIH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0xe, 0 }, { 0x2, 22 } }, .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0xe, 0 }, { 0x2, 22 } }, .isa_id = 'u', .syntax = 5 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16544,8 +16351,8 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xff200000, .op = 0x71200000 }, .id = HEX_INS_A2_TFRIL, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0xe, 0 }, { 0x2, 22 } }, .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0xe, 0 }, { 0x2, 22 } }, .isa_id = 'u', .syntax = 5 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16557,8 +16364,8 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xff200000, .op = 0x78000000 }, .id = HEX_INS_A2_TFRSI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x5, 16 }, { 0x2, 22 } }, .syntax = 3 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x5, 16 }, { 0x2, 22 } }, .isa_id = 's', .syntax = 3 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16570,8 +16377,8 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x70c00000 }, .id = HEX_INS_A2_ZXTH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16583,9 +16390,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe00000, .op = 0x7c800000 }, .id = HEX_INS_A4_COMBINEII, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED, .masks = { { 0x8, 5 } }, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 13 }, { 0x5, 16 } }, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED, .masks = { { 0x8, 5 } }, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 13 }, { 0x5, 16 } }, .isa_id = 'U', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16597,9 +16404,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe02000, .op = 0x73202000 }, .id = HEX_INS_A4_COMBINEIR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16611,9 +16418,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe02000, .op = 0x73002000 }, .id = HEX_INS_A4_COMBINERI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .isa_id = 's', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16625,9 +16432,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe03ce0, .op = 0x70002800 }, .id = HEX_INS_A4_PASLHF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -16640,9 +16447,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe03ce0, .op = 0x70002c00 }, .id = HEX_INS_A4_PASLHFNEW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 19 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -16655,9 +16462,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe03ce0, .op = 0x70002000 }, .id = HEX_INS_A4_PASLHT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 14 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -16670,9 +16477,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe03ce0, .op = 0x70002400 }, .id = HEX_INS_A4_PASLHTNEW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 18 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -16685,9 +16492,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe03ce0, .op = 0x70202800 }, .id = HEX_INS_A4_PASRHF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -16700,9 +16507,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe03ce0, .op = 0x70202c00 }, .id = HEX_INS_A4_PASRHFNEW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 19 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -16715,9 +16522,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe03ce0, .op = 0x70202000 }, .id = HEX_INS_A4_PASRHT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 14 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -16730,9 +16537,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe03ce0, .op = 0x70202400 }, .id = HEX_INS_A4_PASRHTNEW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 18 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -16745,9 +16552,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe03ce0, .op = 0x70a02800 }, .id = HEX_INS_A4_PSXTBF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -16760,9 +16567,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe03ce0, .op = 0x70a02c00 }, .id = HEX_INS_A4_PSXTBFNEW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 19 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -16775,9 +16582,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe03ce0, .op = 0x70a02000 }, .id = HEX_INS_A4_PSXTBT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 14 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -16790,9 +16597,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe03ce0, .op = 0x70a02400 }, .id = HEX_INS_A4_PSXTBTNEW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 18 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -16805,9 +16612,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe03ce0, .op = 0x70e02800 }, .id = HEX_INS_A4_PSXTHF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -16820,9 +16627,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe03ce0, .op = 0x70e02c00 }, .id = HEX_INS_A4_PSXTHFNEW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 19 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -16835,9 +16642,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe03ce0, .op = 0x70e02000 }, .id = HEX_INS_A4_PSXTHT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 14 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -16850,9 +16657,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe03ce0, .op = 0x70e02400 }, .id = HEX_INS_A4_PSXTHTNEW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 18 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -16865,9 +16672,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe03ce0, .op = 0x70802800 }, .id = HEX_INS_A4_PZXTBF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -16880,9 +16687,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe03ce0, .op = 0x70802c00 }, .id = HEX_INS_A4_PZXTBFNEW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 19 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -16895,9 +16702,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe03ce0, .op = 0x70802000 }, .id = HEX_INS_A4_PZXTBT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 14 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -16910,9 +16717,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe03ce0, .op = 0x70802400 }, .id = HEX_INS_A4_PZXTBTNEW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 18 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -16925,9 +16732,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe03ce0, .op = 0x70c02800 }, .id = HEX_INS_A4_PZXTHF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -16940,9 +16747,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe03ce0, .op = 0x70c02c00 }, .id = HEX_INS_A4_PZXTHFNEW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 19 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -16955,9 +16762,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe03ce0, .op = 0x70c02000 }, .id = HEX_INS_A4_PZXTHT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 14 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -16970,9 +16777,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe03ce0, .op = 0x70c02400 }, .id = HEX_INS_A4_PZXTHTNEW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 18 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -16985,9 +16792,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe02000, .op = 0x73402000 }, .id = HEX_INS_A4_RCMPEQI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -16999,9 +16806,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe02000, .op = 0x73602000 }, .id = HEX_INS_A4_RCMPNEQI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .isa_id = 's', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17013,9 +16820,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xff902000, .op = 0x7e800000 }, .id = HEX_INS_C2_CMOVEIF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 21 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 }, { 0x4, 16 } }, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 21 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 }, { 0x4, 16 } }, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -17028,9 +16835,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xff902000, .op = 0x7e000000 }, .id = HEX_INS_C2_CMOVEIT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 21 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 }, { 0x4, 16 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 21 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 }, { 0x4, 16 } }, .isa_id = 's', .syntax = 9 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -17043,9 +16850,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xff902000, .op = 0x7e802000 }, .id = HEX_INS_C2_CMOVENEWIF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 21 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 }, { 0x4, 16 } }, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 21 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 }, { 0x4, 16 } }, .isa_id = 's', .syntax = 14 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -17058,9 +16865,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xff902000, .op = 0x7e002000 }, .id = HEX_INS_C2_CMOVENEWIT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 21 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 }, { 0x4, 16 } }, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 21 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 }, { 0x4, 16 } }, .isa_id = 's', .syntax = 13 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -17073,9 +16880,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffc0001c, .op = 0x75000000 }, .id = HEX_INS_C2_CMPEQI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x1, 21 } }, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x1, 21 } }, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17087,9 +16894,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffc0001c, .op = 0x75400000 }, .id = HEX_INS_C2_CMPGTI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x1, 21 } }, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x1, 21 } }, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17101,9 +16908,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe0001c, .op = 0x75800000 }, .id = HEX_INS_C2_CMPGTUI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 } }, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 } }, .isa_id = 'u', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17115,10 +16922,10 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xfe000000, .op = 0x7a000000 }, .id = HEX_INS_C2_MUXII, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 23 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED, .masks = { { 0x1, 13 }, { 0x7, 16 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 23 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED, .masks = { { 0x1, 13 }, { 0x7, 16 } }, .isa_id = 'S', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17130,10 +16937,10 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xff802000, .op = 0x73000000 }, .id = HEX_INS_C2_MUXIR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 21 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 21 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .isa_id = 's', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17145,10 +16952,10 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xff802000, .op = 0x73800000 }, .id = HEX_INS_C2_MUXRI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 21 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 21 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17160,9 +16967,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffc0001c, .op = 0x75400010 }, .id = HEX_INS_C4_CMPLTEI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x1, 21 } }, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x1, 21 } }, .isa_id = 's', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17174,9 +16981,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffe0001c, .op = 0x75800010 }, .id = HEX_INS_C4_CMPLTEUI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 } }, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 } }, .isa_id = 'u', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17188,9 +16995,9 @@ static const HexInsnTemplate templates_normal_0x7[] = { .encoding = { .mask = 0xffc0001c, .op = 0x75000010 }, .id = HEX_INS_C4_CMPNEQI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x1, 21 } }, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x1, 21 } }, .isa_id = 's', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17206,8 +17013,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x8c800080 }, .id = HEX_INS_A2_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17219,8 +17026,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x808000c0 }, .id = HEX_INS_A2_ABSP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17232,8 +17039,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x8c8000a0 }, .id = HEX_INS_A2_ABSSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17245,8 +17052,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x808000a0 }, .id = HEX_INS_A2_NEGP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17258,8 +17065,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x8c8000c0 }, .id = HEX_INS_A2_NEGSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17271,8 +17078,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x80800080 }, .id = HEX_INS_A2_NOTP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17284,8 +17091,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x88c00020 }, .id = HEX_INS_A2_ROUNDSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17297,8 +17104,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x88c00000 }, .id = HEX_INS_A2_SAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17310,8 +17117,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x8cc000e0 }, .id = HEX_INS_A2_SATB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17323,8 +17130,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x8cc00080 }, .id = HEX_INS_A2_SATH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17336,8 +17143,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x8cc000c0 }, .id = HEX_INS_A2_SATUB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17349,8 +17156,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x8cc000a0 }, .id = HEX_INS_A2_SATUH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17362,8 +17169,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x8c8000e0 }, .id = HEX_INS_A2_SWIZ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17375,8 +17182,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x84400000 }, .id = HEX_INS_A2_SXTW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17388,8 +17195,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x80400080 }, .id = HEX_INS_A2_VABSH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17401,8 +17208,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x804000a0 }, .id = HEX_INS_A2_VABSHSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17414,8 +17221,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x804000c0 }, .id = HEX_INS_A2_VABSW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17427,8 +17234,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x804000e0 }, .id = HEX_INS_A2_VABSWSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17440,8 +17247,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x808000e0 }, .id = HEX_INS_A2_VCONJ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17453,9 +17260,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x88c00080 }, .id = HEX_INS_A4_BITSPLITI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17467,9 +17274,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x8ce00000 }, .id = HEX_INS_A4_CROUND_RI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17481,9 +17288,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x8ce00080 }, .id = HEX_INS_A4_ROUND_RI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17495,9 +17302,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x8ce000c0 }, .id = HEX_INS_A4_ROUND_RI_SAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17509,9 +17316,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x88c000a0 }, .id = HEX_INS_A7_CLIP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17523,9 +17330,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe000e0, .op = 0x8ce00040 }, .id = HEX_INS_A7_CROUNDD_RI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17537,9 +17344,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x88c000c0 }, .id = HEX_INS_A7_VCLIP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17551,9 +17358,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe000fc, .op = 0x85800000 }, .id = HEX_INS_C2_BITSCLRI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .isa_id = 'u', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17565,8 +17372,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffff3ce0, .op = 0x86000000 }, .id = HEX_INS_C2_MASK, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17578,8 +17385,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xfffc3fe0, .op = 0x89400000 }, .id = HEX_INS_C2_TFRPR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 3 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 's', .syntax = 3 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17591,8 +17398,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03ffc, .op = 0x85400000 }, .id = HEX_INS_C2_TFRRP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 3 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 3 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17604,9 +17411,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xfffc3ce0, .op = 0x89000000 }, .id = HEX_INS_C2_VITPACK, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 16 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17618,9 +17425,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe000fc, .op = 0x85a00000 }, .id = HEX_INS_C4_NBITSCLRI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .isa_id = 'u', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17632,8 +17439,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x80e00060 }, .id = HEX_INS_F2_CONV_D2DF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17645,8 +17452,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x88400020 }, .id = HEX_INS_F2_CONV_D2SF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17658,8 +17465,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x80e00000 }, .id = HEX_INS_F2_CONV_DF2D, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17671,8 +17478,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x80e000c0 }, .id = HEX_INS_F2_CONV_DF2D_CHOP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17684,8 +17491,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x88000020 }, .id = HEX_INS_F2_CONV_DF2SF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17697,8 +17504,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x80e00020 }, .id = HEX_INS_F2_CONV_DF2UD, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17710,8 +17517,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x80e000e0 }, .id = HEX_INS_F2_CONV_DF2UD_CHOP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17723,8 +17530,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x88600020 }, .id = HEX_INS_F2_CONV_DF2UW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17736,8 +17543,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x88a00020 }, .id = HEX_INS_F2_CONV_DF2UW_CHOP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17749,8 +17556,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x88800020 }, .id = HEX_INS_F2_CONV_DF2W, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17762,8 +17569,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x88e00020 }, .id = HEX_INS_F2_CONV_DF2W_CHOP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17775,8 +17582,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x84800080 }, .id = HEX_INS_F2_CONV_SF2D, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17788,8 +17595,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x848000c0 }, .id = HEX_INS_F2_CONV_SF2D_CHOP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17801,8 +17608,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x84800000 }, .id = HEX_INS_F2_CONV_SF2DF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17814,8 +17621,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x84800060 }, .id = HEX_INS_F2_CONV_SF2UD, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17827,8 +17634,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x848000a0 }, .id = HEX_INS_F2_CONV_SF2UD_CHOP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17840,8 +17647,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x8b600000 }, .id = HEX_INS_F2_CONV_SF2UW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17853,8 +17660,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x8b600020 }, .id = HEX_INS_F2_CONV_SF2UW_CHOP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17866,8 +17673,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x8b800000 }, .id = HEX_INS_F2_CONV_SF2W, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17879,8 +17686,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x8b800020 }, .id = HEX_INS_F2_CONV_SF2W_CHOP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17892,8 +17699,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x80e00040 }, .id = HEX_INS_F2_CONV_UD2DF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17905,8 +17712,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x88200020 }, .id = HEX_INS_F2_CONV_UD2SF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17918,8 +17725,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x84800020 }, .id = HEX_INS_F2_CONV_UW2DF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17931,8 +17738,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x8b200000 }, .id = HEX_INS_F2_CONV_UW2SF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17944,8 +17751,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x84800040 }, .id = HEX_INS_F2_CONV_W2DF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17957,8 +17764,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x8b400000 }, .id = HEX_INS_F2_CONV_W2SF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17970,9 +17777,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020fc, .op = 0x85e00000 }, .id = HEX_INS_F2_SFCLASS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17984,8 +17791,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x8ba00000 }, .id = HEX_INS_F2_SFFIXUPR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -17997,9 +17804,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03f80, .op = 0x8be00000 }, .id = HEX_INS_F2_SFINVSQRTA, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 1 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'e', .syntax = 1 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18011,9 +17818,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe000e0, .op = 0x80000040 }, .id = HEX_INS_S2_ASL_I_P, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .isa_id = 'u', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18025,9 +17832,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe000e0, .op = 0x820000c0 }, .id = HEX_INS_S2_ASL_I_P_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18039,9 +17846,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe000e0, .op = 0x82400040 }, .id = HEX_INS_S2_ASL_I_P_AND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18053,9 +17860,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe000e0, .op = 0x82000040 }, .id = HEX_INS_S2_ASL_I_P_NAC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18067,9 +17874,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe000e0, .op = 0x824000c0 }, .id = HEX_INS_S2_ASL_I_P_OR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18081,9 +17888,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe000e0, .op = 0x82800040 }, .id = HEX_INS_S2_ASL_I_P_XACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18095,9 +17902,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x8c000040 }, .id = HEX_INS_S2_ASL_I_R, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18109,9 +17916,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x8e0000c0 }, .id = HEX_INS_S2_ASL_I_R_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18123,9 +17930,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x8e400040 }, .id = HEX_INS_S2_ASL_I_R_AND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18137,9 +17944,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x8e000040 }, .id = HEX_INS_S2_ASL_I_R_NAC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18151,9 +17958,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x8e4000c0 }, .id = HEX_INS_S2_ASL_I_R_OR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18165,9 +17972,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x8c400040 }, .id = HEX_INS_S2_ASL_I_R_SAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18179,9 +17986,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x8e800040 }, .id = HEX_INS_S2_ASL_I_R_XACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18193,9 +18000,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe030e0, .op = 0x80800040 }, .id = HEX_INS_S2_ASL_I_VH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 8 } }, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 8 } }, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18207,9 +18014,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x80400040 }, .id = HEX_INS_S2_ASL_I_VW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18221,9 +18028,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe000e0, .op = 0x80000000 }, .id = HEX_INS_S2_ASR_I_P, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .isa_id = 'u', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18235,9 +18042,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe000e0, .op = 0x82000080 }, .id = HEX_INS_S2_ASR_I_P_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18249,9 +18056,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe000e0, .op = 0x82400000 }, .id = HEX_INS_S2_ASR_I_P_AND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18263,9 +18070,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe000e0, .op = 0x82000000 }, .id = HEX_INS_S2_ASR_I_P_NAC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18277,9 +18084,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe000e0, .op = 0x82400080 }, .id = HEX_INS_S2_ASR_I_P_OR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18291,9 +18098,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe000e0, .op = 0x80c000e0 }, .id = HEX_INS_S2_ASR_I_P_RND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .isa_id = 'u', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18305,9 +18112,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x8c000000 }, .id = HEX_INS_S2_ASR_I_R, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18319,9 +18126,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x8e000080 }, .id = HEX_INS_S2_ASR_I_R_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18333,9 +18140,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x8e400000 }, .id = HEX_INS_S2_ASR_I_R_AND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18347,9 +18154,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x8e000000 }, .id = HEX_INS_S2_ASR_I_R_NAC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18361,9 +18168,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x8e400080 }, .id = HEX_INS_S2_ASR_I_R_OR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18375,9 +18182,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x8c400000 }, .id = HEX_INS_S2_ASR_I_R_RND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18389,9 +18196,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x88c00040 }, .id = HEX_INS_S2_ASR_I_SVW_TRUN, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18403,9 +18210,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe030e0, .op = 0x80800000 }, .id = HEX_INS_S2_ASR_I_VH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 8 } }, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 8 } }, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18417,9 +18224,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x80400000 }, .id = HEX_INS_S2_ASR_I_VW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18431,8 +18238,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x8c4000c0 }, .id = HEX_INS_S2_BREV, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18444,8 +18251,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x80c000c0 }, .id = HEX_INS_S2_BREVP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18457,8 +18264,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x8c0000a0 }, .id = HEX_INS_S2_CL0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18470,8 +18277,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x88400040 }, .id = HEX_INS_S2_CL0P, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18483,8 +18290,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x8c0000c0 }, .id = HEX_INS_S2_CL1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18496,8 +18303,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x88400080 }, .id = HEX_INS_S2_CL1P, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18509,8 +18316,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x8c000080 }, .id = HEX_INS_S2_CLB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18522,8 +18329,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x8c0000e0 }, .id = HEX_INS_S2_CLBNORM, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18535,8 +18342,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x88400000 }, .id = HEX_INS_S2_CLBP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18548,9 +18355,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x8cc00020 }, .id = HEX_INS_S2_CLRBIT_I, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18562,8 +18369,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x8c400080 }, .id = HEX_INS_S2_CT0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18575,8 +18382,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x88e00040 }, .id = HEX_INS_S2_CT0P, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18588,8 +18395,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x8c4000a0 }, .id = HEX_INS_S2_CT1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18601,8 +18408,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x88e00080 }, .id = HEX_INS_S2_CT1P, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18614,8 +18421,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x80c00080 }, .id = HEX_INS_S2_DEINTERLEAVE, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18627,10 +18434,10 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xff802000, .op = 0x8d000000 }, .id = HEX_INS_S2_EXTRACTU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x3, 5 }, { 0x2, 21 } }, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x3, 5 }, { 0x2, 21 } }, .isa_id = 'U', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18642,10 +18449,10 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xff000000, .op = 0x81000000 }, .id = HEX_INS_S2_EXTRACTUP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 8 } }, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x3, 5 }, { 0x3, 21 } }, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 8 } }, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x3, 5 }, { 0x3, 21 } }, .isa_id = 'U', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18657,10 +18464,10 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xff802000, .op = 0x8f000000 }, .id = HEX_INS_S2_INSERT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x3, 5 }, { 0x2, 21 } }, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x3, 5 }, { 0x2, 21 } }, .isa_id = 'U', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18672,10 +18479,10 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xff000000, .op = 0x83000000 }, .id = HEX_INS_S2_INSERTP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 8 } }, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x3, 5 }, { 0x3, 21 } }, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 8 } }, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x3, 5 }, { 0x3, 21 } }, .isa_id = 'U', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18687,8 +18494,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x80c000a0 }, .id = HEX_INS_S2_INTERLEAVE, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18700,9 +18507,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe000e0, .op = 0x80000020 }, .id = HEX_INS_S2_LSR_I_P, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .isa_id = 'u', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18714,9 +18521,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe000e0, .op = 0x820000a0 }, .id = HEX_INS_S2_LSR_I_P_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18728,9 +18535,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe000e0, .op = 0x82400020 }, .id = HEX_INS_S2_LSR_I_P_AND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18742,9 +18549,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe000e0, .op = 0x82000020 }, .id = HEX_INS_S2_LSR_I_P_NAC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18756,9 +18563,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe000e0, .op = 0x824000a0 }, .id = HEX_INS_S2_LSR_I_P_OR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18770,9 +18577,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe000e0, .op = 0x82800020 }, .id = HEX_INS_S2_LSR_I_P_XACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18784,9 +18591,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x8c000020 }, .id = HEX_INS_S2_LSR_I_R, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18798,9 +18605,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x8e0000a0 }, .id = HEX_INS_S2_LSR_I_R_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18812,9 +18619,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x8e400020 }, .id = HEX_INS_S2_LSR_I_R_AND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18826,9 +18633,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x8e000020 }, .id = HEX_INS_S2_LSR_I_R_NAC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18840,9 +18647,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x8e4000a0 }, .id = HEX_INS_S2_LSR_I_R_OR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18854,9 +18661,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x8e800020 }, .id = HEX_INS_S2_LSR_I_R_XACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18868,9 +18675,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe030e0, .op = 0x80800020 }, .id = HEX_INS_S2_LSR_I_VH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 8 } }, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 8 } }, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18882,9 +18689,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x80400020 }, .id = HEX_INS_S2_LSR_I_VW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18896,9 +18703,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xff9f2000, .op = 0x8d002000 }, .id = HEX_INS_S2_MASK, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x3, 5 }, { 0x2, 21 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x3, 5 }, { 0x2, 21 } }, .isa_id = 'U', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18910,9 +18717,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x8cc00000 }, .id = HEX_INS_S2_SETBIT_I, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18924,8 +18731,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x8c800000 }, .id = HEX_INS_S2_SVSATHB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18937,8 +18744,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x8c800040 }, .id = HEX_INS_S2_SVSATHUB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18950,10 +18757,10 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffc00000, .op = 0x87000000 }, .id = HEX_INS_S2_TABLEIDXB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x3, 5 }, { 0x1, 21 } }, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED, .masks = { { 0x6, 8 } }, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x3, 5 }, { 0x1, 21 } }, .isa_id = 'u', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED, .masks = { { 0x6, 8 } }, .isa_id = 'S', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18965,10 +18772,10 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffc00000, .op = 0x87c00000 }, .id = HEX_INS_S2_TABLEIDXD, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x3, 5 }, { 0x1, 21 } }, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED, .masks = { { 0x6, 8 } }, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x3, 5 }, { 0x1, 21 } }, .isa_id = 'u', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED, .masks = { { 0x6, 8 } }, .isa_id = 'S', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18980,10 +18787,10 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffc00000, .op = 0x87400000 }, .id = HEX_INS_S2_TABLEIDXH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x3, 5 }, { 0x1, 21 } }, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED, .masks = { { 0x6, 8 } }, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x3, 5 }, { 0x1, 21 } }, .isa_id = 'u', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED, .masks = { { 0x6, 8 } }, .isa_id = 'S', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -18995,10 +18802,10 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffc00000, .op = 0x87800000 }, .id = HEX_INS_S2_TABLEIDXW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x3, 5 }, { 0x1, 21 } }, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED, .masks = { { 0x6, 8 } }, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x3, 5 }, { 0x1, 21 } }, .isa_id = 'u', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED, .masks = { { 0x6, 8 } }, .isa_id = 'S', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19010,9 +18817,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x8cc00040 }, .id = HEX_INS_S2_TOGGLEBIT_I, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19024,9 +18831,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020fc, .op = 0x85000000 }, .id = HEX_INS_S2_TSTBIT_I, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19038,8 +18845,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x88800080 }, .id = HEX_INS_S2_VRNDPACKWH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19051,8 +18858,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x888000c0 }, .id = HEX_INS_S2_VRNDPACKWHS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19064,8 +18871,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x880000c0 }, .id = HEX_INS_S2_VSATHB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19077,8 +18884,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x800000e0 }, .id = HEX_INS_S2_VSATHB_NOPACK, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19090,8 +18897,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x88000000 }, .id = HEX_INS_S2_VSATHUB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19103,8 +18910,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x80000080 }, .id = HEX_INS_S2_VSATHUB_NOPACK, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19116,8 +18923,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x88000040 }, .id = HEX_INS_S2_VSATWH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19129,8 +18936,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x800000c0 }, .id = HEX_INS_S2_VSATWH_NOPACK, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19142,8 +18949,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x88000080 }, .id = HEX_INS_S2_VSATWUH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19155,8 +18962,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x800000a0 }, .id = HEX_INS_S2_VSATWUH_NOPACK, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19168,8 +18975,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x8c4000e0 }, .id = HEX_INS_S2_VSPLATRB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19181,8 +18988,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x84400040 }, .id = HEX_INS_S2_VSPLATRH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19194,8 +19001,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x84000000 }, .id = HEX_INS_S2_VSXTBH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19207,8 +19014,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x84000080 }, .id = HEX_INS_S2_VSXTHW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19220,8 +19027,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x88800040 }, .id = HEX_INS_S2_VTRUNEHB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19233,8 +19040,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x88800000 }, .id = HEX_INS_S2_VTRUNOHB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19246,8 +19053,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x84000040 }, .id = HEX_INS_S2_VZXTBH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19259,8 +19066,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x840000c0 }, .id = HEX_INS_S2_VZXTHW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19272,9 +19079,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe000e0, .op = 0x8c200000 }, .id = HEX_INS_S4_CLBADDI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .isa_id = 's', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19286,9 +19093,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe000e0, .op = 0x88600040 }, .id = HEX_INS_S4_CLBPADDI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .isa_id = 's', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19300,8 +19107,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x88600000 }, .id = HEX_INS_S4_CLBPNORM, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19313,10 +19120,10 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xff802000, .op = 0x8d800000 }, .id = HEX_INS_S4_EXTRACT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x3, 5 }, { 0x2, 21 } }, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x3, 5 }, { 0x2, 21 } }, .isa_id = 'U', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19328,10 +19135,10 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xff000000, .op = 0x8a000000 }, .id = HEX_INS_S4_EXTRACTP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 8 } }, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x3, 5 }, { 0x3, 21 } }, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x6, 8 } }, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x3, 5 }, { 0x3, 21 } }, .isa_id = 'U', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19343,9 +19150,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020fc, .op = 0x85200000 }, .id = HEX_INS_S4_NTSTBIT_I, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19357,9 +19164,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe030e0, .op = 0x88600080 }, .id = HEX_INS_S5_ASRHUB_RND_SAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 8 } }, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 8 } }, .isa_id = 'u', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19371,9 +19178,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe030e0, .op = 0x886000a0 }, .id = HEX_INS_S5_ASRHUB_SAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 8 } }, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 8 } }, .isa_id = 'u', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19385,8 +19192,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x88600060 }, .id = HEX_INS_S5_POPCOUNTP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19398,9 +19205,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe030e0, .op = 0x80200000 }, .id = HEX_INS_S5_VASRHRND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 8 } }, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 8 } }, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19412,9 +19219,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe000e0, .op = 0x80000060 }, .id = HEX_INS_S6_ROL_I_P, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .isa_id = 'u', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19426,9 +19233,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe000e0, .op = 0x820000e0 }, .id = HEX_INS_S6_ROL_I_P_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19440,9 +19247,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe000e0, .op = 0x82400060 }, .id = HEX_INS_S6_ROL_I_P_AND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19454,9 +19261,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe000e0, .op = 0x82000060 }, .id = HEX_INS_S6_ROL_I_P_NAC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19468,9 +19275,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe000e0, .op = 0x824000e0 }, .id = HEX_INS_S6_ROL_I_P_OR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19482,9 +19289,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe000e0, .op = 0x82800060 }, .id = HEX_INS_S6_ROL_I_P_XACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x6, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19496,9 +19303,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x8c000060 }, .id = HEX_INS_S6_ROL_I_R, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19510,9 +19317,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x8e0000e0 }, .id = HEX_INS_S6_ROL_I_R_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19524,9 +19331,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x8e400060 }, .id = HEX_INS_S6_ROL_I_R_AND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19538,9 +19345,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x8e000060 }, .id = HEX_INS_S6_ROL_I_R_NAC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19552,9 +19359,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x8e4000e0 }, .id = HEX_INS_S6_ROL_I_R_OR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19566,9 +19373,9 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe020e0, .op = 0x8e800060 }, .id = HEX_INS_S6_ROL_I_R_XACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 8 } }, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19580,8 +19387,8 @@ static const HexInsnTemplate templates_normal_0x8[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x84400080 }, .id = HEX_INS_S6_VSPLATRBP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19597,8 +19404,8 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x90000000 }, .id = HEX_INS_L2_DEALLOCFRAME, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19610,9 +19417,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xf9e00000, .op = 0x90800000 }, .id = HEX_INS_L2_LOADALIGNB_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x2, 25 } }, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'y', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x2, 25 } }, .isa_id = 's', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19624,9 +19431,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x9e800000 }, .id = HEX_INS_L2_LOADALIGNB_PBR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'y', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19638,10 +19445,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01e00, .op = 0x98800000 }, .id = HEX_INS_L2_LOADALIGNB_PCI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'y', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 21 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19653,9 +19460,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x98800200 }, .id = HEX_INS_L2_LOADALIGNB_PCR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 22 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'y', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 22 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19667,9 +19474,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03e00, .op = 0x9a800000 }, .id = HEX_INS_L2_LOADALIGNB_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'y', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19681,9 +19488,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x9c800000 }, .id = HEX_INS_L2_LOADALIGNB_PR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'y', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19695,9 +19502,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xf9e00000, .op = 0x90400000 }, .id = HEX_INS_L2_LOADALIGNH_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x2, 25 } }, .imm_scale = 1, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'y', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x2, 25 } }, .isa_id = 's', .imm_scale = 1, .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19709,9 +19516,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x9e400000 }, .id = HEX_INS_L2_LOADALIGNH_PBR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'y', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19723,10 +19530,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01e00, .op = 0x98400000 }, .id = HEX_INS_L2_LOADALIGNH_PCI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .imm_scale = 1, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'y', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .imm_scale = 1, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 21 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19738,9 +19545,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x98400200 }, .id = HEX_INS_L2_LOADALIGNH_PCR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 22 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'y', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 22 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19752,9 +19559,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03e00, .op = 0x9a400000 }, .id = HEX_INS_L2_LOADALIGNH_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .imm_scale = 1, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'y', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .imm_scale = 1, .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19766,9 +19573,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x9c400000 }, .id = HEX_INS_L2_LOADALIGNH_PR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'y', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19780,9 +19587,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xf9e00000, .op = 0x90200000 }, .id = HEX_INS_L2_LOADBSW2_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x2, 25 } }, .imm_scale = 1, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x2, 25 } }, .isa_id = 's', .imm_scale = 1, .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19794,9 +19601,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x9e200000 }, .id = HEX_INS_L2_LOADBSW2_PBR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19808,10 +19615,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01e00, .op = 0x98200000 }, .id = HEX_INS_L2_LOADBSW2_PCI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .imm_scale = 1, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .imm_scale = 1, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19823,9 +19630,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x98200200 }, .id = HEX_INS_L2_LOADBSW2_PCR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 18 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19837,9 +19644,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03e00, .op = 0x9a200000 }, .id = HEX_INS_L2_LOADBSW2_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .imm_scale = 1, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .imm_scale = 1, .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19851,9 +19658,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x9c200000 }, .id = HEX_INS_L2_LOADBSW2_PR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19865,9 +19672,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xf9e00000, .op = 0x90e00000 }, .id = HEX_INS_L2_LOADBSW4_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x2, 25 } }, .imm_scale = 2, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x2, 25 } }, .isa_id = 's', .imm_scale = 2, .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19879,9 +19686,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x9ee00000 }, .id = HEX_INS_L2_LOADBSW4_PBR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19893,10 +19700,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01e00, .op = 0x98e00000 }, .id = HEX_INS_L2_LOADBSW4_PCI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .imm_scale = 2, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .imm_scale = 2, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19908,9 +19715,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x98e00200 }, .id = HEX_INS_L2_LOADBSW4_PCR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 18 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19922,9 +19729,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03e00, .op = 0x9ae00000 }, .id = HEX_INS_L2_LOADBSW4_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .imm_scale = 2, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .imm_scale = 2, .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19936,9 +19743,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x9ce00000 }, .id = HEX_INS_L2_LOADBSW4_PR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19950,9 +19757,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xf9e00000, .op = 0x90600000 }, .id = HEX_INS_L2_LOADBZW2_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x2, 25 } }, .imm_scale = 1, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x2, 25 } }, .isa_id = 's', .imm_scale = 1, .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19964,9 +19771,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x9e600000 }, .id = HEX_INS_L2_LOADBZW2_PBR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19978,10 +19785,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01e00, .op = 0x98600000 }, .id = HEX_INS_L2_LOADBZW2_PCI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .imm_scale = 1, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .imm_scale = 1, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 18 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -19993,9 +19800,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x98600200 }, .id = HEX_INS_L2_LOADBZW2_PCR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20007,9 +19814,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03e00, .op = 0x9a600000 }, .id = HEX_INS_L2_LOADBZW2_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .imm_scale = 1, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .imm_scale = 1, .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20021,9 +19828,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x9c600000 }, .id = HEX_INS_L2_LOADBZW2_PR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20035,9 +19842,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xf9e00000, .op = 0x90a00000 }, .id = HEX_INS_L2_LOADBZW4_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x2, 25 } }, .imm_scale = 2, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x2, 25 } }, .isa_id = 's', .imm_scale = 2, .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20049,9 +19856,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x9ea00000 }, .id = HEX_INS_L2_LOADBZW4_PBR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20063,10 +19870,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01e00, .op = 0x98a00000 }, .id = HEX_INS_L2_LOADBZW4_PCI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .imm_scale = 2, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .imm_scale = 2, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 18 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20078,9 +19885,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x98a00200 }, .id = HEX_INS_L2_LOADBZW4_PCR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20092,9 +19899,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03e00, .op = 0x9aa00000 }, .id = HEX_INS_L2_LOADBZW4_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .imm_scale = 2, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .imm_scale = 2, .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20106,9 +19913,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x9ca00000 }, .id = HEX_INS_L2_LOADBZW4_PR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20120,9 +19927,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xf9e00000, .op = 0x91000000 }, .id = HEX_INS_L2_LOADRB_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x2, 25 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x2, 25 } }, .isa_id = 's', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20134,9 +19941,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x9f000000 }, .id = HEX_INS_L2_LOADRB_PBR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20148,10 +19955,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01e00, .op = 0x99000000 }, .id = HEX_INS_L2_LOADRB_PCI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20163,9 +19970,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x99000200 }, .id = HEX_INS_L2_LOADRB_PCR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20177,9 +19984,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03e00, .op = 0x9b000000 }, .id = HEX_INS_L2_LOADRB_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20191,9 +19998,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x9d000000 }, .id = HEX_INS_L2_LOADRB_PR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20205,9 +20012,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xf9e00000, .op = 0x91c00000 }, .id = HEX_INS_L2_LOADRD_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x2, 25 } }, .imm_scale = 3, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x2, 25 } }, .isa_id = 's', .imm_scale = 3, .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20219,9 +20026,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x9fc00000 }, .id = HEX_INS_L2_LOADRD_PBR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20233,10 +20040,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01e00, .op = 0x99c00000 }, .id = HEX_INS_L2_LOADRD_PCI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .imm_scale = 3, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .imm_scale = 3, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20248,9 +20055,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x99c00200 }, .id = HEX_INS_L2_LOADRD_PCR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20262,9 +20069,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03e00, .op = 0x9bc00000 }, .id = HEX_INS_L2_LOADRD_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .imm_scale = 3, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .imm_scale = 3, .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20276,9 +20083,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x9dc00000 }, .id = HEX_INS_L2_LOADRD_PR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20290,9 +20097,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xf9e00000, .op = 0x91400000 }, .id = HEX_INS_L2_LOADRH_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x2, 25 } }, .imm_scale = 1, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x2, 25 } }, .isa_id = 's', .imm_scale = 1, .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20304,9 +20111,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x9f400000 }, .id = HEX_INS_L2_LOADRH_PBR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20318,10 +20125,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01e00, .op = 0x99400000 }, .id = HEX_INS_L2_LOADRH_PCI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .imm_scale = 1, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .imm_scale = 1, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20333,9 +20140,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x99400200 }, .id = HEX_INS_L2_LOADRH_PCR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20347,9 +20154,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03e00, .op = 0x9b400000 }, .id = HEX_INS_L2_LOADRH_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .imm_scale = 1, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .imm_scale = 1, .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20361,9 +20168,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x9d400000 }, .id = HEX_INS_L2_LOADRH_PR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20375,9 +20182,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xf9e00000, .op = 0x91800000 }, .id = HEX_INS_L2_LOADRI_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x2, 25 } }, .imm_scale = 2, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x2, 25 } }, .isa_id = 's', .imm_scale = 2, .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20389,9 +20196,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x9f800000 }, .id = HEX_INS_L2_LOADRI_PBR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20403,10 +20210,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01e00, .op = 0x99800000 }, .id = HEX_INS_L2_LOADRI_PCI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .imm_scale = 2, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .imm_scale = 2, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20418,9 +20225,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x99800200 }, .id = HEX_INS_L2_LOADRI_PCR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20432,9 +20239,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03e00, .op = 0x9b800000 }, .id = HEX_INS_L2_LOADRI_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .imm_scale = 2, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .imm_scale = 2, .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20446,9 +20253,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x9d800000 }, .id = HEX_INS_L2_LOADRI_PR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20460,9 +20267,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xf9e00000, .op = 0x91200000 }, .id = HEX_INS_L2_LOADRUB_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x2, 25 } }, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x2, 25 } }, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20474,9 +20281,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x9f200000 }, .id = HEX_INS_L2_LOADRUB_PBR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20488,10 +20295,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01e00, .op = 0x99200000 }, .id = HEX_INS_L2_LOADRUB_PCI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20503,9 +20310,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x99200200 }, .id = HEX_INS_L2_LOADRUB_PCR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 18 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20517,9 +20324,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03e00, .op = 0x9b200000 }, .id = HEX_INS_L2_LOADRUB_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20531,9 +20338,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x9d200000 }, .id = HEX_INS_L2_LOADRUB_PR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20545,9 +20352,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xf9e00000, .op = 0x91600000 }, .id = HEX_INS_L2_LOADRUH_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x2, 25 } }, .imm_scale = 1, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x2, 25 } }, .isa_id = 's', .imm_scale = 1, .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20559,9 +20366,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x9f600000 }, .id = HEX_INS_L2_LOADRUH_PBR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20573,10 +20380,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01e00, .op = 0x99600000 }, .id = HEX_INS_L2_LOADRUH_PCI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .imm_scale = 1, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .imm_scale = 1, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20588,9 +20395,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x99600200 }, .id = HEX_INS_L2_LOADRUH_PCR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 18 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20602,9 +20409,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03e00, .op = 0x9b600000 }, .id = HEX_INS_L2_LOADRUH_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .imm_scale = 1, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .imm_scale = 1, .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20616,9 +20423,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01fe0, .op = 0x9d600000 }, .id = HEX_INS_L2_LOADRUH_PR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20630,8 +20437,8 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x92000800 }, .id = HEX_INS_L2_LOADW_AQ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20643,8 +20450,8 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x92000000 }, .id = HEX_INS_L2_LOADW_LOCKED, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -20656,10 +20463,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03800, .op = 0x9b002800 }, .id = HEX_INS_L2_PLOADRBF_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .syntax = 17 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -20672,10 +20479,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03800, .op = 0x9b003800 }, .id = HEX_INS_L2_PLOADRBFNEW_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .syntax = 21 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -20688,10 +20495,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03800, .op = 0x9b002000 }, .id = HEX_INS_L2_PLOADRBT_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .syntax = 16 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -20704,10 +20511,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03800, .op = 0x9b003000 }, .id = HEX_INS_L2_PLOADRBTNEW_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .syntax = 20 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -20720,10 +20527,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03800, .op = 0x9bc02800 }, .id = HEX_INS_L2_PLOADRDF_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .imm_scale = 3, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .imm_scale = 3, .syntax = 17 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -20736,10 +20543,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03800, .op = 0x9bc03800 }, .id = HEX_INS_L2_PLOADRDFNEW_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .imm_scale = 3, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .imm_scale = 3, .syntax = 21 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -20752,10 +20559,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03800, .op = 0x9bc02000 }, .id = HEX_INS_L2_PLOADRDT_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .imm_scale = 3, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .imm_scale = 3, .syntax = 16 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -20768,10 +20575,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03800, .op = 0x9bc03000 }, .id = HEX_INS_L2_PLOADRDTNEW_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .imm_scale = 3, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .imm_scale = 3, .syntax = 20 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -20784,10 +20591,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03800, .op = 0x9b402800 }, .id = HEX_INS_L2_PLOADRHF_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .imm_scale = 1, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .imm_scale = 1, .syntax = 17 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -20800,10 +20607,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03800, .op = 0x9b403800 }, .id = HEX_INS_L2_PLOADRHFNEW_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .imm_scale = 1, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .imm_scale = 1, .syntax = 21 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -20816,10 +20623,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03800, .op = 0x9b402000 }, .id = HEX_INS_L2_PLOADRHT_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .imm_scale = 1, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .imm_scale = 1, .syntax = 16 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -20832,10 +20639,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03800, .op = 0x9b403000 }, .id = HEX_INS_L2_PLOADRHTNEW_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .imm_scale = 1, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .imm_scale = 1, .syntax = 20 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -20848,10 +20655,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03800, .op = 0x9b802800 }, .id = HEX_INS_L2_PLOADRIF_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .imm_scale = 2, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .imm_scale = 2, .syntax = 17 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -20864,10 +20671,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03800, .op = 0x9b803800 }, .id = HEX_INS_L2_PLOADRIFNEW_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .imm_scale = 2, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .imm_scale = 2, .syntax = 21 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -20880,10 +20687,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03800, .op = 0x9b802000 }, .id = HEX_INS_L2_PLOADRIT_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .imm_scale = 2, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .imm_scale = 2, .syntax = 16 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -20896,10 +20703,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03800, .op = 0x9b803000 }, .id = HEX_INS_L2_PLOADRITNEW_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .imm_scale = 2, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .imm_scale = 2, .syntax = 20 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -20912,10 +20719,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03800, .op = 0x9b202800 }, .id = HEX_INS_L2_PLOADRUBF_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .syntax = 18 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -20928,10 +20735,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03800, .op = 0x9b203800 }, .id = HEX_INS_L2_PLOADRUBFNEW_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 20 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .syntax = 22 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .syntax = 22 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -20944,10 +20751,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03800, .op = 0x9b202000 }, .id = HEX_INS_L2_PLOADRUBT_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .syntax = 17 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -20960,10 +20767,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03800, .op = 0x9b203000 }, .id = HEX_INS_L2_PLOADRUBTNEW_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .syntax = 21 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -20976,10 +20783,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03800, .op = 0x9b602800 }, .id = HEX_INS_L2_PLOADRUHF_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .imm_scale = 1, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .imm_scale = 1, .syntax = 18 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -20992,10 +20799,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03800, .op = 0x9b603800 }, .id = HEX_INS_L2_PLOADRUHFNEW_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 20 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .imm_scale = 1, .syntax = 22 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .imm_scale = 1, .syntax = 22 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -21008,10 +20815,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03800, .op = 0x9b602000 }, .id = HEX_INS_L2_PLOADRUHT_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .imm_scale = 1, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .imm_scale = 1, .syntax = 17 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -21024,10 +20831,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03800, .op = 0x9b603000 }, .id = HEX_INS_L2_PLOADRUHTNEW_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .imm_scale = 1, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 5 } }, .isa_id = 's', .imm_scale = 1, .syntax = 21 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -21040,9 +20847,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03080, .op = 0x9a801000 }, .id = HEX_INS_L4_LOADALIGNB_AP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'y', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'e', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .isa_id = 'U', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -21054,10 +20861,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01000, .op = 0x9c801000 }, .id = HEX_INS_L4_LOADALIGNB_UR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'y', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .isa_id = 'U', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -21069,9 +20876,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03080, .op = 0x9a401000 }, .id = HEX_INS_L4_LOADALIGNH_AP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'y', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'e', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .isa_id = 'U', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -21083,10 +20890,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01000, .op = 0x9c401000 }, .id = HEX_INS_L4_LOADALIGNH_UR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'y', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .isa_id = 'U', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -21098,9 +20905,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03080, .op = 0x9a201000 }, .id = HEX_INS_L4_LOADBSW2_AP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'e', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .isa_id = 'U', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -21112,10 +20919,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01000, .op = 0x9c201000 }, .id = HEX_INS_L4_LOADBSW2_UR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .isa_id = 'U', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -21127,9 +20934,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03080, .op = 0x9ae01000 }, .id = HEX_INS_L4_LOADBSW4_AP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'e', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .isa_id = 'U', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -21141,10 +20948,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01000, .op = 0x9ce01000 }, .id = HEX_INS_L4_LOADBSW4_UR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .isa_id = 'U', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -21156,9 +20963,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03080, .op = 0x9a601000 }, .id = HEX_INS_L4_LOADBZW2_AP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'e', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .isa_id = 'U', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -21170,10 +20977,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01000, .op = 0x9c601000 }, .id = HEX_INS_L4_LOADBZW2_UR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .isa_id = 'U', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -21185,9 +20992,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03080, .op = 0x9aa01000 }, .id = HEX_INS_L4_LOADBZW4_AP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'e', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .isa_id = 'U', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -21199,10 +21006,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01000, .op = 0x9ca01000 }, .id = HEX_INS_L4_LOADBZW4_UR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .isa_id = 'U', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -21214,8 +21021,8 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x92001800 }, .id = HEX_INS_L4_LOADD_AQ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -21227,8 +21034,8 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x92001000 }, .id = HEX_INS_L4_LOADD_LOCKED, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -21240,9 +21047,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03080, .op = 0x9b001000 }, .id = HEX_INS_L4_LOADRB_AP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'e', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .isa_id = 'U', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -21254,10 +21061,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01000, .op = 0x9d001000 }, .id = HEX_INS_L4_LOADRB_UR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .isa_id = 'U', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -21269,9 +21076,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03080, .op = 0x9bc01000 }, .id = HEX_INS_L4_LOADRD_AP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'e', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .isa_id = 'U', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -21283,10 +21090,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01000, .op = 0x9dc01000 }, .id = HEX_INS_L4_LOADRD_UR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .isa_id = 'U', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -21298,9 +21105,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03080, .op = 0x9b401000 }, .id = HEX_INS_L4_LOADRH_AP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'e', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .isa_id = 'U', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -21312,10 +21119,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01000, .op = 0x9d401000 }, .id = HEX_INS_L4_LOADRH_UR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .isa_id = 'U', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -21327,9 +21134,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03080, .op = 0x9b801000 }, .id = HEX_INS_L4_LOADRI_AP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'e', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .isa_id = 'U', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -21341,10 +21148,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01000, .op = 0x9d801000 }, .id = HEX_INS_L4_LOADRI_UR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .isa_id = 'U', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -21356,9 +21163,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03080, .op = 0x9b201000 }, .id = HEX_INS_L4_LOADRUB_AP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'e', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .isa_id = 'U', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -21370,10 +21177,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01000, .op = 0x9d201000 }, .id = HEX_INS_L4_LOADRUB_UR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .isa_id = 'U', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -21385,9 +21192,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03080, .op = 0x9b601000 }, .id = HEX_INS_L4_LOADRUH_AP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'e', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .isa_id = 'U', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -21399,10 +21206,10 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe01000, .op = 0x9d601000 }, .id = HEX_INS_L4_LOADRUH_UR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 7 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x2, 5 }, { 0x4, 8 } }, .isa_id = 'U', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -21414,9 +21221,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe038e0, .op = 0x9f002880 }, .id = HEX_INS_L4_PLOADRBF_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .isa_id = 'u', .syntax = 15 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -21429,9 +21236,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe038e0, .op = 0x9f003880 }, .id = HEX_INS_L4_PLOADRBFNEW_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .isa_id = 'u', .syntax = 19 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -21444,9 +21251,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe038e0, .op = 0x9f002080 }, .id = HEX_INS_L4_PLOADRBT_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .isa_id = 'u', .syntax = 14 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -21459,9 +21266,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe038e0, .op = 0x9f003080 }, .id = HEX_INS_L4_PLOADRBTNEW_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .isa_id = 'u', .syntax = 18 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -21474,9 +21281,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe038e0, .op = 0x9fc02880 }, .id = HEX_INS_L4_PLOADRDF_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .isa_id = 'u', .syntax = 15 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -21489,9 +21296,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe038e0, .op = 0x9fc03880 }, .id = HEX_INS_L4_PLOADRDFNEW_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .isa_id = 'u', .syntax = 19 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -21504,9 +21311,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe038e0, .op = 0x9fc02080 }, .id = HEX_INS_L4_PLOADRDT_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .isa_id = 'u', .syntax = 14 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -21519,9 +21326,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe038e0, .op = 0x9fc03080 }, .id = HEX_INS_L4_PLOADRDTNEW_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .isa_id = 'u', .syntax = 18 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -21534,9 +21341,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe038e0, .op = 0x9f402880 }, .id = HEX_INS_L4_PLOADRHF_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .isa_id = 'u', .syntax = 15 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -21549,9 +21356,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe038e0, .op = 0x9f403880 }, .id = HEX_INS_L4_PLOADRHFNEW_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .isa_id = 'u', .syntax = 19 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -21564,9 +21371,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe038e0, .op = 0x9f402080 }, .id = HEX_INS_L4_PLOADRHT_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .isa_id = 'u', .syntax = 14 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -21579,9 +21386,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe038e0, .op = 0x9f403080 }, .id = HEX_INS_L4_PLOADRHTNEW_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .isa_id = 'u', .syntax = 18 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -21594,9 +21401,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe038e0, .op = 0x9f802880 }, .id = HEX_INS_L4_PLOADRIF_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .isa_id = 'u', .syntax = 15 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -21609,9 +21416,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe038e0, .op = 0x9f803880 }, .id = HEX_INS_L4_PLOADRIFNEW_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .isa_id = 'u', .syntax = 19 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -21624,9 +21431,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe038e0, .op = 0x9f802080 }, .id = HEX_INS_L4_PLOADRIT_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .isa_id = 'u', .syntax = 14 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -21639,9 +21446,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe038e0, .op = 0x9f803080 }, .id = HEX_INS_L4_PLOADRITNEW_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .isa_id = 'u', .syntax = 18 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -21654,9 +21461,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe038e0, .op = 0x9f202880 }, .id = HEX_INS_L4_PLOADRUBF_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .isa_id = 'u', .syntax = 16 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -21669,9 +21476,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe038e0, .op = 0x9f203880 }, .id = HEX_INS_L4_PLOADRUBFNEW_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .isa_id = 'u', .syntax = 20 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -21684,9 +21491,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe038e0, .op = 0x9f202080 }, .id = HEX_INS_L4_PLOADRUBT_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .isa_id = 'u', .syntax = 15 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -21699,9 +21506,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe038e0, .op = 0x9f203080 }, .id = HEX_INS_L4_PLOADRUBTNEW_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .isa_id = 'u', .syntax = 19 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -21714,9 +21521,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe038e0, .op = 0x9f602880 }, .id = HEX_INS_L4_PLOADRUHF_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .isa_id = 'u', .syntax = 16 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -21729,9 +21536,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe038e0, .op = 0x9f603880 }, .id = HEX_INS_L4_PLOADRUHFNEW_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .isa_id = 'u', .syntax = 20 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -21744,9 +21551,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe038e0, .op = 0x9f602080 }, .id = HEX_INS_L4_PLOADRUHT_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .isa_id = 'u', .syntax = 15 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -21759,9 +21566,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe038e0, .op = 0x9f603080 }, .id = HEX_INS_L4_PLOADRUHTNEW_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 9 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 't', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 8 }, { 0x5, 16 } }, .isa_id = 'u', .syntax = 19 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -21774,8 +21581,8 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03fe0, .op = 0x96000000 }, .id = HEX_INS_L4_RETURN, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 18 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -21787,9 +21594,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03ce0, .op = 0x96003000 }, .id = HEX_INS_L4_RETURN_F, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 25 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 25 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -21802,9 +21609,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03ce0, .op = 0x96002800 }, .id = HEX_INS_L4_RETURN_FNEW_PNT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 29 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 29 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -21817,9 +21624,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03ce0, .op = 0x96003800 }, .id = HEX_INS_L4_RETURN_FNEW_PT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 29 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 29 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -21832,9 +21639,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03ce0, .op = 0x96001000 }, .id = HEX_INS_L4_RETURN_T, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 24 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 24 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -21847,9 +21654,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03ce0, .op = 0x96000800 }, .id = HEX_INS_L4_RETURN_TNEW_PNT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 28 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 28 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -21862,9 +21669,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03ce0, .op = 0x96001800 }, .id = HEX_INS_L4_RETURN_TNEW_PT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 28 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 8 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 28 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -21877,9 +21684,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe000ff, .op = 0x92000040 }, .id = HEX_INS_L6_MEMCPY, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -21891,9 +21698,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe020e0, .op = 0x92000020 }, .id = HEX_INS_V6_EXTRACTW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_HVX_VR, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -21905,8 +21712,8 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe03800, .op = 0x94000000 }, .id = HEX_INS_Y2_DCFETCHBO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0xb, 0 } }, .imm_scale = 3, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0xb, 0 } }, .isa_id = 'u', .imm_scale = 3, .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -21918,9 +21725,9 @@ static const HexInsnTemplate templates_normal_0x9[] = { .encoding = { .mask = 0xffe020e0, .op = 0x92002000 }, .id = HEX_INS_IMPORTED_RD_MEMW_PHYS_RS_RT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -21936,7 +21743,7 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe020fc, .op = 0xa0e0000c }, .id = HEX_INS_R6_RELEASE_AT_VI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -21948,7 +21755,7 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe020fc, .op = 0xa0e0002c }, .id = HEX_INS_R6_RELEASE_ST_VI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -21960,8 +21767,8 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe03800, .op = 0xa0800000 }, .id = HEX_INS_S2_ALLOCFRAME, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0xb, 0 } }, .imm_scale = 3, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0xb, 0 } }, .isa_id = 'u', .imm_scale = 3, .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -21973,10 +21780,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe02084, .op = 0xab002004 }, .id = HEX_INS_S2_PSTORERBF_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 18 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -21989,10 +21796,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe02084, .op = 0xab002084 }, .id = HEX_INS_S2_PSTORERBFNEW_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 22 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 22 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -22005,10 +21812,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe03884, .op = 0xaba02004 }, .id = HEX_INS_S2_PSTORERBNEWF_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 18 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -22021,10 +21828,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe03884, .op = 0xaba02084 }, .id = HEX_INS_S2_PSTORERBNEWFNEW_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 22 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 22 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -22037,10 +21844,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe03884, .op = 0xaba02000 }, .id = HEX_INS_S2_PSTORERBNEWT_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 17 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -22053,10 +21860,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe03884, .op = 0xaba02080 }, .id = HEX_INS_S2_PSTORERBNEWTNEW_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 21 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -22069,10 +21876,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe02084, .op = 0xab002000 }, .id = HEX_INS_S2_PSTORERBT_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 17 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -22085,10 +21892,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe02084, .op = 0xab002080 }, .id = HEX_INS_S2_PSTORERBTNEW_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 21 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -22101,10 +21908,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe02084, .op = 0xabc02004 }, .id = HEX_INS_S2_PSTORERDF_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .imm_scale = 3, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .imm_scale = 3, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 18 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -22117,10 +21924,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe02084, .op = 0xabc02084 }, .id = HEX_INS_S2_PSTORERDFNEW_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .imm_scale = 3, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 22 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .imm_scale = 3, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 22 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -22133,10 +21940,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe02084, .op = 0xabc02000 }, .id = HEX_INS_S2_PSTORERDT_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .imm_scale = 3, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .imm_scale = 3, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 17 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -22149,10 +21956,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe02084, .op = 0xabc02080 }, .id = HEX_INS_S2_PSTORERDTNEW_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .imm_scale = 3, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .imm_scale = 3, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 21 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -22165,10 +21972,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe02084, .op = 0xab602004 }, .id = HEX_INS_S2_PSTORERFF_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .imm_scale = 1, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .imm_scale = 1, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 18 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -22181,10 +21988,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe02084, .op = 0xab602084 }, .id = HEX_INS_S2_PSTORERFFNEW_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .imm_scale = 1, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 22 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .imm_scale = 1, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 22 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -22197,10 +22004,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe02084, .op = 0xab602000 }, .id = HEX_INS_S2_PSTORERFT_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .imm_scale = 1, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .imm_scale = 1, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 17 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -22213,10 +22020,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe02084, .op = 0xab602080 }, .id = HEX_INS_S2_PSTORERFTNEW_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .imm_scale = 1, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .imm_scale = 1, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 21 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -22229,10 +22036,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe02084, .op = 0xab402004 }, .id = HEX_INS_S2_PSTORERHF_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .imm_scale = 1, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .imm_scale = 1, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 18 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -22245,10 +22052,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe02084, .op = 0xab402084 }, .id = HEX_INS_S2_PSTORERHFNEW_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .imm_scale = 1, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 22 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .imm_scale = 1, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 22 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -22261,10 +22068,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe03884, .op = 0xaba02804 }, .id = HEX_INS_S2_PSTORERHNEWF_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .imm_scale = 1, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .imm_scale = 1, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 18 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -22277,10 +22084,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe03884, .op = 0xaba02884 }, .id = HEX_INS_S2_PSTORERHNEWFNEW_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .imm_scale = 1, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 22 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .imm_scale = 1, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 22 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -22293,10 +22100,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe03884, .op = 0xaba02800 }, .id = HEX_INS_S2_PSTORERHNEWT_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .imm_scale = 1, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .imm_scale = 1, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 17 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -22309,10 +22116,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe03884, .op = 0xaba02880 }, .id = HEX_INS_S2_PSTORERHNEWTNEW_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .imm_scale = 1, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .imm_scale = 1, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 21 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -22325,10 +22132,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe02084, .op = 0xab402000 }, .id = HEX_INS_S2_PSTORERHT_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .imm_scale = 1, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .imm_scale = 1, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 17 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -22341,10 +22148,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe02084, .op = 0xab402080 }, .id = HEX_INS_S2_PSTORERHTNEW_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .imm_scale = 1, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .imm_scale = 1, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 21 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -22357,10 +22164,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe02084, .op = 0xab802004 }, .id = HEX_INS_S2_PSTORERIF_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .imm_scale = 2, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .imm_scale = 2, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 18 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -22373,10 +22180,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe02084, .op = 0xab802084 }, .id = HEX_INS_S2_PSTORERIFNEW_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .imm_scale = 2, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 22 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .imm_scale = 2, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 22 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -22389,10 +22196,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe03884, .op = 0xaba03004 }, .id = HEX_INS_S2_PSTORERINEWF_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .imm_scale = 2, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .imm_scale = 2, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 18 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -22405,10 +22212,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe03884, .op = 0xaba03084 }, .id = HEX_INS_S2_PSTORERINEWFNEW_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .imm_scale = 2, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 22 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .imm_scale = 2, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 22 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -22421,10 +22228,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe03884, .op = 0xaba03000 }, .id = HEX_INS_S2_PSTORERINEWT_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .imm_scale = 2, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .imm_scale = 2, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 17 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -22437,10 +22244,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe03884, .op = 0xaba03080 }, .id = HEX_INS_S2_PSTORERINEWTNEW_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .imm_scale = 2, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .imm_scale = 2, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 21 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -22453,10 +22260,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe02084, .op = 0xab802000 }, .id = HEX_INS_S2_PSTORERIT_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .imm_scale = 2, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .imm_scale = 2, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 17 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -22469,10 +22276,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe02084, .op = 0xab802080 }, .id = HEX_INS_S2_PSTORERITNEW_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .imm_scale = 2, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .imm_scale = 2, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 21 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -22485,9 +22292,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xf9e00000, .op = 0xa1000000 }, .id = HEX_INS_S2_STORERB_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x2, 25 } }, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x2, 25 } }, .isa_id = 's', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -22499,9 +22306,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe000ff, .op = 0xaf000000 }, .id = HEX_INS_S2_STORERB_PBR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -22513,10 +22320,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe00087, .op = 0xa9000000 }, .id = HEX_INS_S2_STORERB_PCI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 18 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -22528,9 +22335,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe000ff, .op = 0xa9000002 }, .id = HEX_INS_S2_STORERB_PCR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -22542,9 +22349,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe02087, .op = 0xab000000 }, .id = HEX_INS_S2_STORERB_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -22556,9 +22363,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe000ff, .op = 0xad000000 }, .id = HEX_INS_S2_STORERB_PR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -22570,9 +22377,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xf9e01800, .op = 0xa1a00000 }, .id = HEX_INS_S2_STORERBNEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x2, 25 } }, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x2, 25 } }, .isa_id = 's', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -22584,9 +22391,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe018ff, .op = 0xafa00000 }, .id = HEX_INS_S2_STORERBNEW_PBR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -22598,10 +22405,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe01887, .op = 0xa9a00000 }, .id = HEX_INS_S2_STORERBNEW_PCI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 18 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -22613,9 +22420,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe018ff, .op = 0xa9a00002 }, .id = HEX_INS_S2_STORERBNEW_PCR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -22627,9 +22434,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe03887, .op = 0xaba00000 }, .id = HEX_INS_S2_STORERBNEW_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -22641,9 +22448,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe018ff, .op = 0xada00000 }, .id = HEX_INS_S2_STORERBNEW_PR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -22655,9 +22462,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xf9e00000, .op = 0xa1c00000 }, .id = HEX_INS_S2_STORERD_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x2, 25 } }, .imm_scale = 3, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x2, 25 } }, .isa_id = 's', .imm_scale = 3, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -22669,9 +22476,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe000ff, .op = 0xafc00000 }, .id = HEX_INS_S2_STORERD_PBR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -22683,10 +22490,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe00087, .op = 0xa9c00000 }, .id = HEX_INS_S2_STORERD_PCI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .imm_scale = 3, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .imm_scale = 3, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 18 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -22698,9 +22505,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe000ff, .op = 0xa9c00002 }, .id = HEX_INS_S2_STORERD_PCR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -22712,9 +22519,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe02087, .op = 0xabc00000 }, .id = HEX_INS_S2_STORERD_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .imm_scale = 3, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .imm_scale = 3, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -22726,9 +22533,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe000ff, .op = 0xadc00000 }, .id = HEX_INS_S2_STORERD_PR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -22740,9 +22547,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xf9e00000, .op = 0xa1600000 }, .id = HEX_INS_S2_STORERF_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x2, 25 } }, .imm_scale = 1, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x2, 25 } }, .isa_id = 's', .imm_scale = 1, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -22754,9 +22561,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe000ff, .op = 0xaf600000 }, .id = HEX_INS_S2_STORERF_PBR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -22768,10 +22575,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe00087, .op = 0xa9600000 }, .id = HEX_INS_S2_STORERF_PCI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .imm_scale = 1, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .imm_scale = 1, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 18 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -22783,9 +22590,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe000ff, .op = 0xa9600002 }, .id = HEX_INS_S2_STORERF_PCR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -22797,9 +22604,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe02087, .op = 0xab600000 }, .id = HEX_INS_S2_STORERF_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .imm_scale = 1, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .imm_scale = 1, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -22811,9 +22618,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe000ff, .op = 0xad600000 }, .id = HEX_INS_S2_STORERF_PR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -22825,9 +22632,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xf9e00000, .op = 0xa1400000 }, .id = HEX_INS_S2_STORERH_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x2, 25 } }, .imm_scale = 1, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x2, 25 } }, .isa_id = 's', .imm_scale = 1, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -22839,9 +22646,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe000ff, .op = 0xaf400000 }, .id = HEX_INS_S2_STORERH_PBR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -22853,10 +22660,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe00087, .op = 0xa9400000 }, .id = HEX_INS_S2_STORERH_PCI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .imm_scale = 1, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .imm_scale = 1, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 18 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -22868,9 +22675,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe000ff, .op = 0xa9400002 }, .id = HEX_INS_S2_STORERH_PCR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -22882,9 +22689,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe02087, .op = 0xab400000 }, .id = HEX_INS_S2_STORERH_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .imm_scale = 1, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .imm_scale = 1, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -22896,9 +22703,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe000ff, .op = 0xad400000 }, .id = HEX_INS_S2_STORERH_PR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -22910,9 +22717,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xf9e01800, .op = 0xa1a00800 }, .id = HEX_INS_S2_STORERHNEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x2, 25 } }, .imm_scale = 1, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x2, 25 } }, .isa_id = 's', .imm_scale = 1, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -22924,9 +22731,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe018ff, .op = 0xafa00800 }, .id = HEX_INS_S2_STORERHNEW_PBR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -22938,10 +22745,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe01887, .op = 0xa9a00800 }, .id = HEX_INS_S2_STORERHNEW_PCI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .imm_scale = 1, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .imm_scale = 1, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 18 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -22953,9 +22760,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe018ff, .op = 0xa9a00802 }, .id = HEX_INS_S2_STORERHNEW_PCR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -22967,9 +22774,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe03887, .op = 0xaba00800 }, .id = HEX_INS_S2_STORERHNEW_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .imm_scale = 1, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .imm_scale = 1, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -22981,9 +22788,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe018ff, .op = 0xada00800 }, .id = HEX_INS_S2_STORERHNEW_PR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -22995,9 +22802,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xf9e00000, .op = 0xa1800000 }, .id = HEX_INS_S2_STORERI_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x2, 25 } }, .imm_scale = 2, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x2, 25 } }, .isa_id = 's', .imm_scale = 2, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -23009,9 +22816,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe000ff, .op = 0xaf800000 }, .id = HEX_INS_S2_STORERI_PBR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -23023,10 +22830,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe00087, .op = 0xa9800000 }, .id = HEX_INS_S2_STORERI_PCI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .imm_scale = 2, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .imm_scale = 2, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 18 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -23038,9 +22845,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe000ff, .op = 0xa9800002 }, .id = HEX_INS_S2_STORERI_PCR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -23052,9 +22859,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe02087, .op = 0xab800000 }, .id = HEX_INS_S2_STORERI_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .imm_scale = 2, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .imm_scale = 2, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -23066,9 +22873,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe000ff, .op = 0xad800000 }, .id = HEX_INS_S2_STORERI_PR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -23080,9 +22887,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xf9e01800, .op = 0xa1a01000 }, .id = HEX_INS_S2_STORERINEW_IO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x2, 25 } }, .imm_scale = 2, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 0 }, { 0x1, 13 }, { 0x2, 25 } }, .isa_id = 's', .imm_scale = 2, .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -23094,9 +22901,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe018ff, .op = 0xafa01000 }, .id = HEX_INS_S2_STORERINEW_PBR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -23108,10 +22915,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe01887, .op = 0xa9a01000 }, .id = HEX_INS_S2_STORERINEW_PCI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .imm_scale = 2, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .imm_scale = 2, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 18 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -23123,9 +22930,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe018ff, .op = 0xa9a01002 }, .id = HEX_INS_S2_STORERINEW_PCR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -23137,9 +22944,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe03887, .op = 0xaba01000 }, .id = HEX_INS_S2_STORERINEW_PI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .imm_scale = 2, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x4, 3 } }, .isa_id = 's', .imm_scale = 2, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -23151,9 +22958,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe018ff, .op = 0xada01000 }, .id = HEX_INS_S2_STORERINEW_PR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x1, 13 } }, .reg_cls = HEX_REG_CLASS_MOD_REGS, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -23165,9 +22972,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe020fc, .op = 0xa0a00000 }, .id = HEX_INS_S2_STOREW_LOCKED, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -23179,8 +22986,8 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe020fc, .op = 0xa0a00008 }, .id = HEX_INS_S2_STOREW_RL_AT_VI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -23192,8 +22999,8 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe020fc, .op = 0xa0a00028 }, .id = HEX_INS_S2_STOREW_RL_ST_VI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -23205,9 +23012,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xfffc2084, .op = 0xaf000084 }, .id = HEX_INS_S4_PSTORERBF_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -23220,9 +23027,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xfffc2084, .op = 0xaf002084 }, .id = HEX_INS_S4_PSTORERBFNEW_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .isa_id = 'u', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 20 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -23235,9 +23042,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xfffc3884, .op = 0xafa00084 }, .id = HEX_INS_S4_PSTORERBNEWF_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -23250,9 +23057,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xfffc3884, .op = 0xafa02084 }, .id = HEX_INS_S4_PSTORERBNEWFNEW_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .isa_id = 'u', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 20 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -23265,9 +23072,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xfffc3884, .op = 0xafa00080 }, .id = HEX_INS_S4_PSTORERBNEWT_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -23280,9 +23087,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xfffc3884, .op = 0xafa02080 }, .id = HEX_INS_S4_PSTORERBNEWTNEW_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .isa_id = 'u', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 19 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -23295,9 +23102,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xfffc2084, .op = 0xaf000080 }, .id = HEX_INS_S4_PSTORERBT_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -23310,9 +23117,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xfffc2084, .op = 0xaf002080 }, .id = HEX_INS_S4_PSTORERBTNEW_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .isa_id = 'u', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 19 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -23325,9 +23132,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xfffc2084, .op = 0xafc00084 }, .id = HEX_INS_S4_PSTORERDF_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -23340,9 +23147,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xfffc2084, .op = 0xafc02084 }, .id = HEX_INS_S4_PSTORERDFNEW_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .isa_id = 'u', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 20 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -23355,9 +23162,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xfffc2084, .op = 0xafc00080 }, .id = HEX_INS_S4_PSTORERDT_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -23370,9 +23177,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xfffc2084, .op = 0xafc02080 }, .id = HEX_INS_S4_PSTORERDTNEW_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .isa_id = 'u', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 19 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -23385,9 +23192,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xfffc2084, .op = 0xaf600084 }, .id = HEX_INS_S4_PSTORERFF_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -23400,9 +23207,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xfffc2084, .op = 0xaf602084 }, .id = HEX_INS_S4_PSTORERFFNEW_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .isa_id = 'u', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 20 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -23415,9 +23222,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xfffc2084, .op = 0xaf600080 }, .id = HEX_INS_S4_PSTORERFT_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -23430,9 +23237,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xfffc2084, .op = 0xaf602080 }, .id = HEX_INS_S4_PSTORERFTNEW_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .isa_id = 'u', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 19 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -23445,9 +23252,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xfffc2084, .op = 0xaf400084 }, .id = HEX_INS_S4_PSTORERHF_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -23460,9 +23267,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xfffc2084, .op = 0xaf402084 }, .id = HEX_INS_S4_PSTORERHFNEW_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .isa_id = 'u', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 20 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -23475,9 +23282,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xfffc3884, .op = 0xafa00884 }, .id = HEX_INS_S4_PSTORERHNEWF_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -23490,9 +23297,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xfffc3884, .op = 0xafa02884 }, .id = HEX_INS_S4_PSTORERHNEWFNEW_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .isa_id = 'u', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 20 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -23505,9 +23312,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xfffc3884, .op = 0xafa00880 }, .id = HEX_INS_S4_PSTORERHNEWT_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -23520,9 +23327,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xfffc3884, .op = 0xafa02880 }, .id = HEX_INS_S4_PSTORERHNEWTNEW_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .isa_id = 'u', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 19 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -23535,9 +23342,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xfffc2084, .op = 0xaf400080 }, .id = HEX_INS_S4_PSTORERHT_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -23550,9 +23357,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xfffc2084, .op = 0xaf402080 }, .id = HEX_INS_S4_PSTORERHTNEW_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .isa_id = 'u', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 19 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -23565,9 +23372,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xfffc2084, .op = 0xaf800084 }, .id = HEX_INS_S4_PSTORERIF_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -23580,9 +23387,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xfffc2084, .op = 0xaf802084 }, .id = HEX_INS_S4_PSTORERIFNEW_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .isa_id = 'u', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 20 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -23595,9 +23402,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xfffc3884, .op = 0xafa01084 }, .id = HEX_INS_S4_PSTORERINEWF_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -23610,9 +23417,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xfffc3884, .op = 0xafa03084 }, .id = HEX_INS_S4_PSTORERINEWFNEW_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 20 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .isa_id = 'u', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 20 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -23625,9 +23432,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xfffc3884, .op = 0xafa01080 }, .id = HEX_INS_S4_PSTORERINEWT_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -23640,9 +23447,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xfffc3884, .op = 0xafa03080 }, .id = HEX_INS_S4_PSTORERINEWTNEW_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .isa_id = 'u', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 19 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -23655,9 +23462,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xfffc2084, .op = 0xaf800080 }, .id = HEX_INS_S4_PSTORERIT_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .isa_id = 'u', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -23670,9 +23477,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xfffc2084, .op = 0xaf802080 }, .id = HEX_INS_S4_PSTORERITNEW_ABS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'v', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x4, 3 }, { 0x2, 16 } }, .isa_id = 'u', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 19 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -23685,9 +23492,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe020fc, .op = 0xa0e00000 }, .id = HEX_INS_S4_STORED_LOCKED, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 17 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -23699,8 +23506,8 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe020fc, .op = 0xa0e00008 }, .id = HEX_INS_S4_STORED_RL_AT_VI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -23712,8 +23519,8 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe020fc, .op = 0xa0e00028 }, .id = HEX_INS_S4_STORED_RL_ST_VI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -23725,9 +23532,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe020c0, .op = 0xab000080 }, .id = HEX_INS_S4_STORERB_AP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 0 } }, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'e', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 0 } }, .isa_id = 'U', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -23739,10 +23546,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe00080, .op = 0xad000080 }, .id = HEX_INS_S4_STORERB_UR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 6 }, { 0x1, 13 } }, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 0 } }, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 6 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 0 } }, .isa_id = 'U', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -23754,9 +23561,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe038c0, .op = 0xaba00080 }, .id = HEX_INS_S4_STORERBNEW_AP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 0 } }, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'e', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 0 } }, .isa_id = 'U', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -23768,10 +23575,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe01880, .op = 0xada00080 }, .id = HEX_INS_S4_STORERBNEW_UR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 6 }, { 0x1, 13 } }, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 0 } }, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 6 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 0 } }, .isa_id = 'U', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -23783,9 +23590,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe020c0, .op = 0xabc00080 }, .id = HEX_INS_S4_STORERD_AP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 0 } }, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'e', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 0 } }, .isa_id = 'U', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -23797,10 +23604,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe00080, .op = 0xadc00080 }, .id = HEX_INS_S4_STORERD_UR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 6 }, { 0x1, 13 } }, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 0 } }, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 6 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 0 } }, .isa_id = 'U', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -23812,9 +23619,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe020c0, .op = 0xab600080 }, .id = HEX_INS_S4_STORERF_AP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 0 } }, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'e', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 0 } }, .isa_id = 'U', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -23826,10 +23633,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe00080, .op = 0xad600080 }, .id = HEX_INS_S4_STORERF_UR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 6 }, { 0x1, 13 } }, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 0 } }, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 6 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 0 } }, .isa_id = 'U', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -23841,9 +23648,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe020c0, .op = 0xab400080 }, .id = HEX_INS_S4_STORERH_AP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 0 } }, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'e', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 0 } }, .isa_id = 'U', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -23855,10 +23662,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe00080, .op = 0xad400080 }, .id = HEX_INS_S4_STORERH_UR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 6 }, { 0x1, 13 } }, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 0 } }, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 6 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 0 } }, .isa_id = 'U', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -23870,9 +23677,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe038c0, .op = 0xaba00880 }, .id = HEX_INS_S4_STORERHNEW_AP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 0 } }, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'e', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 0 } }, .isa_id = 'U', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -23884,10 +23691,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe01880, .op = 0xada00880 }, .id = HEX_INS_S4_STORERHNEW_UR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 6 }, { 0x1, 13 } }, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 0 } }, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 6 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 0 } }, .isa_id = 'U', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -23899,9 +23706,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe020c0, .op = 0xab800080 }, .id = HEX_INS_S4_STORERI_AP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 0 } }, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'e', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 0 } }, .isa_id = 'U', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -23913,10 +23720,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe00080, .op = 0xad800080 }, .id = HEX_INS_S4_STORERI_UR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 6 }, { 0x1, 13 } }, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 0 } }, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 6 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 0 } }, .isa_id = 'U', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -23928,9 +23735,9 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe038c0, .op = 0xaba01080 }, .id = HEX_INS_S4_STORERINEW_AP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 0 } }, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'e', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 0 } }, .isa_id = 'U', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -23942,10 +23749,10 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe01880, .op = 0xada01080 }, .id = HEX_INS_S4_STORERINEW_UR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 6 }, { 0x1, 13 } }, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 0 } }, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x1, 6 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x6, 0 } }, .isa_id = 'U', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_N_REG, .masks = { { 0x3, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -23966,7 +23773,7 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe03fff, .op = 0xa0000000 }, .id = HEX_INS_Y2_DCCLEANA, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -23978,7 +23785,7 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe03fff, .op = 0xa2200000 }, .id = HEX_INS_Y2_DCCLEANIDX, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -23990,7 +23797,7 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe03fff, .op = 0xa0400000 }, .id = HEX_INS_Y2_DCCLEANINVA, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24002,7 +23809,7 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe03fff, .op = 0xa2600000 }, .id = HEX_INS_Y2_DCCLEANINVIDX, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24014,7 +23821,7 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe03fff, .op = 0xa0200000 }, .id = HEX_INS_Y2_DCINVA, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24026,7 +23833,7 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe03fff, .op = 0xa2400000 }, .id = HEX_INS_Y2_DCINVIDX, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24047,8 +23854,8 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe03fe0, .op = 0xa4200000 }, .id = HEX_INS_Y2_DCTAGR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24060,8 +23867,8 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe020ff, .op = 0xa4000000 }, .id = HEX_INS_Y2_DCTAGW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24073,7 +23880,7 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe03fff, .op = 0xa0c00000 }, .id = HEX_INS_Y2_DCZEROA, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24085,7 +23892,7 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe03fff, .op = 0xa8600000 }, .id = HEX_INS_Y2_L2CLEANINVIDX, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24115,8 +23922,8 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe020ff, .op = 0xa6000000 }, .id = HEX_INS_Y4_L2FETCH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24128,8 +23935,8 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe03fe0, .op = 0xa4600000 }, .id = HEX_INS_Y4_L2TAGR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24141,8 +23948,8 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe020ff, .op = 0xa4400000 }, .id = HEX_INS_Y4_L2TAGW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24154,7 +23961,7 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe03fff, .op = 0xa6200000 }, .id = HEX_INS_Y5_L2CLEANIDX, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24166,8 +23973,8 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe020ff, .op = 0xa6800000 }, .id = HEX_INS_Y5_L2FETCH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24206,7 +24013,7 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe03fff, .op = 0xa6400000 }, .id = HEX_INS_Y5_L2INVIDX, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24218,8 +24025,8 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe03ffc, .op = 0xa0e02000 }, .id = HEX_INS_Y5_L2LOCKA, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24231,7 +24038,7 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe03fff, .op = 0xa6600000 }, .id = HEX_INS_Y5_L2UNLOCKA, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24243,8 +24050,8 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe020ff, .op = 0xa6000040 }, .id = HEX_INS_Y6_DMLINK, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24256,7 +24063,7 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffff3fe0, .op = 0xa8000060 }, .id = HEX_INS_Y6_DMPAUSE, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24268,7 +24075,7 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffff3fe0, .op = 0xa8000040 }, .id = HEX_INS_Y6_DMPOLL, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24280,7 +24087,7 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe03fff, .op = 0xa6000080 }, .id = HEX_INS_Y6_DMRESUME, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24292,7 +24099,7 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffe03fff, .op = 0xa6000020 }, .id = HEX_INS_Y6_DMSTART, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24304,7 +24111,7 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffff3fe0, .op = 0xa8000020 }, .id = HEX_INS_Y6_DMWAIT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24316,7 +24123,7 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffff20ff, .op = 0xa6a00000 }, .id = HEX_INS_IMPORTED_L2GCLEAN_RTT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24328,7 +24135,7 @@ static const HexInsnTemplate templates_normal_0xa[] = { .encoding = { .mask = 0xffff20ff, .op = 0xa6c00000 }, .id = HEX_INS_IMPORTED_L2GCLEANINV_RTT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24344,9 +24151,9 @@ static const HexInsnTemplate templates_normal_0xb[] = { .encoding = { .mask = 0xf0000000, .op = 0xb0000000 }, .id = HEX_INS_A2_ADDI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x7, 21 } }, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x7, 21 } }, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24362,10 +24169,10 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe02080, .op = 0xc2c00000 }, .id = HEX_INS_A4_ADDP_C, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'x', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24377,9 +24184,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020fc, .op = 0xc7c000c0 }, .id = HEX_INS_A4_CMPBEQ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24391,9 +24198,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020fc, .op = 0xc7c00040 }, .id = HEX_INS_A4_CMPBGT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24405,9 +24212,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020fc, .op = 0xc7c000e0 }, .id = HEX_INS_A4_CMPBGTU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24419,9 +24226,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020fc, .op = 0xc7c00060 }, .id = HEX_INS_A4_CMPHEQ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24433,9 +24240,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020fc, .op = 0xc7c00080 }, .id = HEX_INS_A4_CMPHGT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24447,9 +24254,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020fc, .op = 0xc7c000a0 }, .id = HEX_INS_A4_CMPHGTU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24461,9 +24268,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc6c00000 }, .id = HEX_INS_A4_CROUND_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24475,9 +24282,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc6c00080 }, .id = HEX_INS_A4_ROUND_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24489,9 +24296,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc6c000c0 }, .id = HEX_INS_A4_ROUND_RR_SAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24503,10 +24310,10 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe02080, .op = 0xc2e00000 }, .id = HEX_INS_A4_SUBP_C, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'x', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24518,9 +24325,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcb200020 }, .id = HEX_INS_A4_VRMAXH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24532,9 +24339,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcb202020 }, .id = HEX_INS_A4_VRMAXUH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24546,9 +24353,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcb202040 }, .id = HEX_INS_A4_VRMAXUW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24560,9 +24367,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcb200040 }, .id = HEX_INS_A4_VRMAXW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24574,9 +24381,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcb2000a0 }, .id = HEX_INS_A4_VRMINH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24588,9 +24395,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcb2020a0 }, .id = HEX_INS_A4_VRMINUH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24602,9 +24409,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcb2020c0 }, .id = HEX_INS_A4_VRMINUW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24616,9 +24423,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcb2000c0 }, .id = HEX_INS_A4_VRMINW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24630,9 +24437,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc1400020 }, .id = HEX_INS_A5_VADDHUBS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24644,9 +24451,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc6c00040 }, .id = HEX_INS_A7_CROUNDD_RR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24658,9 +24465,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020fc, .op = 0xc7800000 }, .id = HEX_INS_C2_BITSCLR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24672,9 +24479,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020fc, .op = 0xc7400000 }, .id = HEX_INS_C2_BITSSET, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24686,9 +24493,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020fc, .op = 0xc7a00000 }, .id = HEX_INS_C4_NBITSCLR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24700,9 +24507,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020fc, .op = 0xc7600000 }, .id = HEX_INS_C4_NBITSSET, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24714,9 +24521,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020fc, .op = 0xc7e00060 }, .id = HEX_INS_F2_SFCMPEQ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24728,9 +24535,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020fc, .op = 0xc7e00000 }, .id = HEX_INS_F2_SFCMPGE, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24742,9 +24549,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020fc, .op = 0xc7e00080 }, .id = HEX_INS_F2_SFCMPGT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24756,9 +24563,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020fc, .op = 0xc7e00020 }, .id = HEX_INS_F2_SFCMPUO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24770,9 +24577,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc5000080 }, .id = HEX_INS_M4_CMPYI_WH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24784,9 +24591,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc50000a0 }, .id = HEX_INS_M4_CMPYI_WHC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24798,9 +24605,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc50000c0 }, .id = HEX_INS_M4_CMPYR_WH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24812,9 +24619,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc50000e0 }, .id = HEX_INS_M4_CMPYR_WHC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24826,9 +24633,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xca800000 }, .id = HEX_INS_M4_XOR_XACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24840,10 +24647,10 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe02000, .op = 0xc4000000 }, .id = HEX_INS_S2_ADDASL_RRRI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 5 } }, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 5 } }, .isa_id = 'u', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24855,9 +24662,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc3800080 }, .id = HEX_INS_S2_ASL_R_P, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24869,9 +24676,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcbc00080 }, .id = HEX_INS_S2_ASL_R_P_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24883,9 +24690,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcb400080 }, .id = HEX_INS_S2_ASL_R_P_AND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24897,9 +24704,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcb800080 }, .id = HEX_INS_S2_ASL_R_P_NAC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24911,9 +24718,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcb000080 }, .id = HEX_INS_S2_ASL_R_P_OR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24925,9 +24732,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcb600080 }, .id = HEX_INS_S2_ASL_R_P_XOR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24939,9 +24746,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc6400080 }, .id = HEX_INS_S2_ASL_R_R, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24953,9 +24760,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xccc00080 }, .id = HEX_INS_S2_ASL_R_R_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24967,9 +24774,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcc400080 }, .id = HEX_INS_S2_ASL_R_R_AND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24981,9 +24788,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcc800080 }, .id = HEX_INS_S2_ASL_R_R_NAC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -24995,9 +24802,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcc000080 }, .id = HEX_INS_S2_ASL_R_R_OR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25009,9 +24816,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc6000080 }, .id = HEX_INS_S2_ASL_R_R_SAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25023,9 +24830,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc3400080 }, .id = HEX_INS_S2_ASL_R_VH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25037,9 +24844,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc3000080 }, .id = HEX_INS_S2_ASL_R_VW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25051,9 +24858,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc3800000 }, .id = HEX_INS_S2_ASR_R_P, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25065,9 +24872,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcbc00000 }, .id = HEX_INS_S2_ASR_R_P_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25079,9 +24886,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcb400000 }, .id = HEX_INS_S2_ASR_R_P_AND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25093,9 +24900,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcb800000 }, .id = HEX_INS_S2_ASR_R_P_NAC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25107,9 +24914,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcb000000 }, .id = HEX_INS_S2_ASR_R_P_OR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25121,9 +24928,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcb600000 }, .id = HEX_INS_S2_ASR_R_P_XOR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25135,9 +24942,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc6400000 }, .id = HEX_INS_S2_ASR_R_R, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25149,9 +24956,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xccc00000 }, .id = HEX_INS_S2_ASR_R_R_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25163,9 +24970,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcc400000 }, .id = HEX_INS_S2_ASR_R_R_AND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25177,9 +24984,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcc800000 }, .id = HEX_INS_S2_ASR_R_R_NAC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25191,9 +24998,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcc000000 }, .id = HEX_INS_S2_ASR_R_R_OR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25205,9 +25012,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc6000000 }, .id = HEX_INS_S2_ASR_R_R_SAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25219,9 +25026,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc5000040 }, .id = HEX_INS_S2_ASR_R_SVW_TRUN, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25233,9 +25040,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc3400000 }, .id = HEX_INS_S2_ASR_R_VH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25247,9 +25054,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc3000000 }, .id = HEX_INS_S2_ASR_R_VW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25261,9 +25068,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc1c000c0 }, .id = HEX_INS_S2_CABACDECBIN, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25275,9 +25082,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc6800040 }, .id = HEX_INS_S2_CLRBIT_R, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25289,9 +25096,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc9000000 }, .id = HEX_INS_S2_EXTRACTU_RP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25303,9 +25110,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc1000000 }, .id = HEX_INS_S2_EXTRACTUP_RP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25317,9 +25124,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc8000000 }, .id = HEX_INS_S2_INSERT_RP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25331,9 +25138,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xca000000 }, .id = HEX_INS_S2_INSERTP_RP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25345,9 +25152,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc18000c0 }, .id = HEX_INS_S2_LFSP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25359,9 +25166,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc38000c0 }, .id = HEX_INS_S2_LSL_R_P, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25373,9 +25180,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcbc000c0 }, .id = HEX_INS_S2_LSL_R_P_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25387,9 +25194,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcb4000c0 }, .id = HEX_INS_S2_LSL_R_P_AND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25401,9 +25208,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcb8000c0 }, .id = HEX_INS_S2_LSL_R_P_NAC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25415,9 +25222,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcb0000c0 }, .id = HEX_INS_S2_LSL_R_P_OR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25429,9 +25236,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcb6000c0 }, .id = HEX_INS_S2_LSL_R_P_XOR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25443,9 +25250,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc64000c0 }, .id = HEX_INS_S2_LSL_R_R, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25457,9 +25264,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xccc000c0 }, .id = HEX_INS_S2_LSL_R_R_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25471,9 +25278,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcc4000c0 }, .id = HEX_INS_S2_LSL_R_R_AND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25485,9 +25292,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcc8000c0 }, .id = HEX_INS_S2_LSL_R_R_NAC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25499,9 +25306,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcc0000c0 }, .id = HEX_INS_S2_LSL_R_R_OR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25513,9 +25320,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc34000c0 }, .id = HEX_INS_S2_LSL_R_VH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25527,9 +25334,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc30000c0 }, .id = HEX_INS_S2_LSL_R_VW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25541,9 +25348,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc3800040 }, .id = HEX_INS_S2_LSR_R_P, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25555,9 +25362,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcbc00040 }, .id = HEX_INS_S2_LSR_R_P_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25569,9 +25376,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcb400040 }, .id = HEX_INS_S2_LSR_R_P_AND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25583,9 +25390,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcb800040 }, .id = HEX_INS_S2_LSR_R_P_NAC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25597,9 +25404,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcb000040 }, .id = HEX_INS_S2_LSR_R_P_OR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25611,9 +25418,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcb600040 }, .id = HEX_INS_S2_LSR_R_P_XOR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25625,9 +25432,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc6400040 }, .id = HEX_INS_S2_LSR_R_R, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25639,9 +25446,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xccc00040 }, .id = HEX_INS_S2_LSR_R_R_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25653,9 +25460,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcc400040 }, .id = HEX_INS_S2_LSR_R_R_AND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25667,9 +25474,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcc800040 }, .id = HEX_INS_S2_LSR_R_R_NAC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25681,9 +25488,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcc000040 }, .id = HEX_INS_S2_LSR_R_R_OR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25695,9 +25502,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc3400040 }, .id = HEX_INS_S2_LSR_R_VH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25709,9 +25516,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc3000040 }, .id = HEX_INS_S2_LSR_R_VW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25723,9 +25530,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc6800000 }, .id = HEX_INS_S2_SETBIT_R, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25737,9 +25544,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc1000040 }, .id = HEX_INS_S2_SHUFFEB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25751,9 +25558,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc10000c0 }, .id = HEX_INS_S2_SHUFFEH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25765,9 +25572,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc1000080 }, .id = HEX_INS_S2_SHUFFOB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25779,9 +25586,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc1800000 }, .id = HEX_INS_S2_SHUFFOH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25793,9 +25600,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc6800080 }, .id = HEX_INS_S2_TOGGLEBIT_R, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25807,9 +25614,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020fc, .op = 0xc7000000 }, .id = HEX_INS_S2_TSTBIT_R, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25821,10 +25628,10 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe02000, .op = 0xc0000000 }, .id = HEX_INS_S2_VALIGNIB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 5 } }, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 5 } }, .isa_id = 'u', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25836,10 +25643,10 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe02080, .op = 0xc2000000 }, .id = HEX_INS_S2_VALIGNRB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25851,9 +25658,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc3c00040 }, .id = HEX_INS_S2_VCNEGH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25865,9 +25672,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc3c00000 }, .id = HEX_INS_S2_VCROTATE, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25879,9 +25686,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xcb2020e0 }, .id = HEX_INS_S2_VRCNEGH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25893,10 +25700,10 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe02000, .op = 0xc0800000 }, .id = HEX_INS_S2_VSPLICEIB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 5 } }, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 5 } }, .isa_id = 'u', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25908,10 +25715,10 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe02080, .op = 0xc2800000 }, .id = HEX_INS_S2_VSPLICERB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25923,9 +25730,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc1800040 }, .id = HEX_INS_S2_VTRUNEWH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25937,9 +25744,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc1800080 }, .id = HEX_INS_S2_VTRUNOWH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25951,9 +25758,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc9000040 }, .id = HEX_INS_S4_EXTRACT_RP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25965,9 +25772,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc1c00080 }, .id = HEX_INS_S4_EXTRACTP_RP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25979,9 +25786,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020c0, .op = 0xc68000c0 }, .id = HEX_INS_S4_LSLI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 5 }, { 0x5, 16 } }, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 5 }, { 0x5, 16 } }, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -25993,9 +25800,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020fc, .op = 0xc7200000 }, .id = HEX_INS_S4_NTSTBIT_R, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26007,10 +25814,10 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe000c0, .op = 0xc3c000c0 }, .id = HEX_INS_S4_VRCROTATE, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 5 }, { 0x1, 13 } }, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 5 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26022,10 +25829,10 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe000c0, .op = 0xcba00000 }, .id = HEX_INS_S4_VRCROTATE_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 5 }, { 0x1, 13 } }, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x1, 5 }, { 0x1, 13 } }, .isa_id = 'u', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26037,9 +25844,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc1400080 }, .id = HEX_INS_S4_VXADDSUBH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26051,9 +25858,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc1c00000 }, .id = HEX_INS_S4_VXADDSUBHR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26065,9 +25872,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc1400000 }, .id = HEX_INS_S4_VXADDSUBW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26079,9 +25886,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc14000c0 }, .id = HEX_INS_S4_VXSUBADDH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26093,9 +25900,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc1c00040 }, .id = HEX_INS_S4_VXSUBADDHR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26107,9 +25914,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc1400040 }, .id = HEX_INS_S4_VXSUBADDW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26121,9 +25928,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc1800060 }, .id = HEX_INS_S6_VTRUNEHB_PPP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26135,9 +25942,9 @@ static const HexInsnTemplate templates_normal_0xc[] = { .encoding = { .mask = 0xffe020e0, .op = 0xc18000a0 }, .id = HEX_INS_S6_VTRUNOHB_PPP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26153,9 +25960,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd5400060 }, .id = HEX_INS_A2_ADDH_H16_HH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26167,9 +25974,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd5400040 }, .id = HEX_INS_A2_ADDH_H16_HL, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26181,9 +25988,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd5400020 }, .id = HEX_INS_A2_ADDH_H16_LH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26195,9 +26002,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd5400000 }, .id = HEX_INS_A2_ADDH_H16_LL, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26209,9 +26016,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd54000e0 }, .id = HEX_INS_A2_ADDH_H16_SAT_HH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26223,9 +26030,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd54000c0 }, .id = HEX_INS_A2_ADDH_H16_SAT_HL, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26237,9 +26044,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd54000a0 }, .id = HEX_INS_A2_ADDH_H16_SAT_LH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26251,9 +26058,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd5400080 }, .id = HEX_INS_A2_ADDH_H16_SAT_LL, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26265,9 +26072,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd5000040 }, .id = HEX_INS_A2_ADDH_L16_HL, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26279,9 +26086,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd5000000 }, .id = HEX_INS_A2_ADDH_L16_LL, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26293,9 +26100,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd50000c0 }, .id = HEX_INS_A2_ADDH_L16_SAT_HL, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26307,9 +26114,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd5000080 }, .id = HEX_INS_A2_ADDH_L16_SAT_LL, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26321,9 +26128,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd30000e0 }, .id = HEX_INS_A2_ADDP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26335,9 +26142,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd36000a0 }, .id = HEX_INS_A2_ADDPSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26349,9 +26156,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd36000e0 }, .id = HEX_INS_A2_ADDSPH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26363,9 +26170,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd36000c0 }, .id = HEX_INS_A2_ADDSPL, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26377,9 +26184,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3e00000 }, .id = HEX_INS_A2_ANDP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26391,9 +26198,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd5c00000 }, .id = HEX_INS_A2_MAX, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26405,9 +26212,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3c00080 }, .id = HEX_INS_A2_MAXP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26419,9 +26226,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd5c00080 }, .id = HEX_INS_A2_MAXU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26433,9 +26240,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3c000a0 }, .id = HEX_INS_A2_MAXUP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26447,9 +26254,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd5a00000 }, .id = HEX_INS_A2_MIN, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26461,9 +26268,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3a000c0 }, .id = HEX_INS_A2_MINP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26475,9 +26282,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd5a00080 }, .id = HEX_INS_A2_MINU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26489,9 +26296,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3a000e0 }, .id = HEX_INS_A2_MINUP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26503,9 +26310,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3e00040 }, .id = HEX_INS_A2_ORP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26517,9 +26324,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd5600060 }, .id = HEX_INS_A2_SUBH_H16_HH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26531,9 +26338,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd5600040 }, .id = HEX_INS_A2_SUBH_H16_HL, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26545,9 +26352,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd5600020 }, .id = HEX_INS_A2_SUBH_H16_LH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26559,9 +26366,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd5600000 }, .id = HEX_INS_A2_SUBH_H16_LL, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26573,9 +26380,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd56000e0 }, .id = HEX_INS_A2_SUBH_H16_SAT_HH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26587,9 +26394,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd56000c0 }, .id = HEX_INS_A2_SUBH_H16_SAT_HL, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26601,9 +26408,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd56000a0 }, .id = HEX_INS_A2_SUBH_H16_SAT_LH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26615,9 +26422,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd5600080 }, .id = HEX_INS_A2_SUBH_H16_SAT_LL, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26629,9 +26436,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd5200040 }, .id = HEX_INS_A2_SUBH_L16_HL, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26643,9 +26450,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd5200000 }, .id = HEX_INS_A2_SUBH_L16_LL, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26657,9 +26464,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd52000c0 }, .id = HEX_INS_A2_SUBH_L16_SAT_HL, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26671,9 +26478,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd5200080 }, .id = HEX_INS_A2_SUBH_L16_SAT_LL, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26685,9 +26492,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd32000e0 }, .id = HEX_INS_A2_SUBP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26699,9 +26506,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3000040 }, .id = HEX_INS_A2_VADDH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26713,9 +26520,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3000060 }, .id = HEX_INS_A2_VADDHS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26727,9 +26534,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3000000 }, .id = HEX_INS_A2_VADDUB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26741,9 +26548,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3000020 }, .id = HEX_INS_A2_VADDUBS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26755,9 +26562,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3000080 }, .id = HEX_INS_A2_VADDUHS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26769,9 +26576,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd30000a0 }, .id = HEX_INS_A2_VADDW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26783,9 +26590,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd30000c0 }, .id = HEX_INS_A2_VADDWS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26797,9 +26604,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3400040 }, .id = HEX_INS_A2_VAVGH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26811,9 +26618,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3400080 }, .id = HEX_INS_A2_VAVGHCR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26825,9 +26632,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3400060 }, .id = HEX_INS_A2_VAVGHR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26839,9 +26646,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3400000 }, .id = HEX_INS_A2_VAVGUB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26853,9 +26660,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3400020 }, .id = HEX_INS_A2_VAVGUBR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26867,9 +26674,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd34000a0 }, .id = HEX_INS_A2_VAVGUH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26881,9 +26688,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd34000c0 }, .id = HEX_INS_A2_VAVGUHR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26895,9 +26702,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3600060 }, .id = HEX_INS_A2_VAVGUW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26909,9 +26716,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3600080 }, .id = HEX_INS_A2_VAVGUWR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26923,9 +26730,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3600000 }, .id = HEX_INS_A2_VAVGW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26937,9 +26744,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3600040 }, .id = HEX_INS_A2_VAVGWCR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26951,9 +26758,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3600020 }, .id = HEX_INS_A2_VAVGWR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26965,9 +26772,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020fc, .op = 0xd20000c0 }, .id = HEX_INS_A2_VCMPBEQ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26979,9 +26786,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020fc, .op = 0xd20000e0 }, .id = HEX_INS_A2_VCMPBGTU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -26993,9 +26800,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020fc, .op = 0xd2000060 }, .id = HEX_INS_A2_VCMPHEQ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27007,9 +26814,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020fc, .op = 0xd2000080 }, .id = HEX_INS_A2_VCMPHGT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27021,9 +26828,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020fc, .op = 0xd20000a0 }, .id = HEX_INS_A2_VCMPHGTU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27035,9 +26842,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020fc, .op = 0xd2000000 }, .id = HEX_INS_A2_VCMPWEQ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27049,9 +26856,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020fc, .op = 0xd2000020 }, .id = HEX_INS_A2_VCMPWGT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27063,9 +26870,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020fc, .op = 0xd2000040 }, .id = HEX_INS_A2_VCMPWGTU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27077,9 +26884,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3c000c0 }, .id = HEX_INS_A2_VMAXB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27091,9 +26898,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3c00020 }, .id = HEX_INS_A2_VMAXH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27105,9 +26912,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3c00000 }, .id = HEX_INS_A2_VMAXUB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27119,9 +26926,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3c00040 }, .id = HEX_INS_A2_VMAXUH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27133,9 +26940,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3a000a0 }, .id = HEX_INS_A2_VMAXUW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27147,9 +26954,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3c00060 }, .id = HEX_INS_A2_VMAXW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27161,9 +26968,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3c000e0 }, .id = HEX_INS_A2_VMINB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27175,9 +26982,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3a00020 }, .id = HEX_INS_A2_VMINH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27189,9 +26996,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3a00000 }, .id = HEX_INS_A2_VMINUB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27203,9 +27010,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3a00040 }, .id = HEX_INS_A2_VMINUH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27217,9 +27024,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3a00080 }, .id = HEX_INS_A2_VMINUW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27231,9 +27038,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3a00060 }, .id = HEX_INS_A2_VMINW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27245,9 +27052,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3800000 }, .id = HEX_INS_A2_VNAVGH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27259,9 +27066,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3800040 }, .id = HEX_INS_A2_VNAVGHCR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27273,9 +27080,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3800020 }, .id = HEX_INS_A2_VNAVGHR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27287,9 +27094,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3800060 }, .id = HEX_INS_A2_VNAVGW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27301,9 +27108,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd38000c0 }, .id = HEX_INS_A2_VNAVGWCR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27315,9 +27122,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3800080 }, .id = HEX_INS_A2_VNAVGWR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27329,9 +27136,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3200040 }, .id = HEX_INS_A2_VSUBH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27343,9 +27150,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3200060 }, .id = HEX_INS_A2_VSUBHS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27357,9 +27164,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3200000 }, .id = HEX_INS_A2_VSUBUB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27371,9 +27178,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3200020 }, .id = HEX_INS_A2_VSUBUBS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27385,9 +27192,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3200080 }, .id = HEX_INS_A2_VSUBUHS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27399,9 +27206,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd32000a0 }, .id = HEX_INS_A2_VSUBW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27413,9 +27220,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd32000c0 }, .id = HEX_INS_A2_VSUBWS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27427,9 +27234,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3e00080 }, .id = HEX_INS_A2_XORP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27441,9 +27248,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3e00020 }, .id = HEX_INS_A4_ANDNP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27455,9 +27262,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd4200000 }, .id = HEX_INS_A4_BITSPLIT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27469,9 +27276,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020fc, .op = 0xd20020a0 }, .id = HEX_INS_A4_BOUNDSCHECK_HI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27483,9 +27290,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020fc, .op = 0xd2002080 }, .id = HEX_INS_A4_BOUNDSCHECK_LO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 15 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 16 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27497,9 +27304,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe0201c, .op = 0xdd000000 }, .id = HEX_INS_A4_CMPBEQI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x8, 5 } }, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x8, 5 } }, .isa_id = 'u', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27511,9 +27318,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe0201c, .op = 0xdd200000 }, .id = HEX_INS_A4_CMPBGTI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x8, 5 } }, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x8, 5 } }, .isa_id = 's', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27525,9 +27332,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe0301c, .op = 0xdd400000 }, .id = HEX_INS_A4_CMPBGTUI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 5 } }, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 5 } }, .isa_id = 'u', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27539,9 +27346,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe0201c, .op = 0xdd000008 }, .id = HEX_INS_A4_CMPHEQI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .isa_id = 's', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27553,9 +27360,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe0201c, .op = 0xdd200008 }, .id = HEX_INS_A4_CMPHGTI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .isa_id = 's', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27567,9 +27374,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe0301c, .op = 0xdd400008 }, .id = HEX_INS_A4_CMPHGTUI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 5 } }, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x7, 5 } }, .isa_id = 'u', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27581,9 +27388,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3e000e0 }, .id = HEX_INS_A4_MODWRAPU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27595,9 +27402,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd3e00060 }, .id = HEX_INS_A4_ORNP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27609,9 +27416,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020fc, .op = 0xd2002060 }, .id = HEX_INS_A4_TLBMATCH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27623,9 +27430,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020fc, .op = 0xd2002000 }, .id = HEX_INS_A4_VCMPBEQ_ANY, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 18 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27637,9 +27444,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe0201c, .op = 0xdc000000 }, .id = HEX_INS_A4_VCMPBEQI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x8, 5 } }, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x8, 5 } }, .isa_id = 'u', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27651,9 +27458,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020fc, .op = 0xd2002040 }, .id = HEX_INS_A4_VCMPBGT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27665,9 +27472,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe0201c, .op = 0xdc200000 }, .id = HEX_INS_A4_VCMPBGTI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x8, 5 } }, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x8, 5 } }, .isa_id = 's', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27679,9 +27486,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe0301c, .op = 0xdc400000 }, .id = HEX_INS_A4_VCMPBGTUI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x7, 5 } }, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x7, 5 } }, .isa_id = 'u', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27693,9 +27500,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe0201c, .op = 0xdc000008 }, .id = HEX_INS_A4_VCMPHEQI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x8, 5 } }, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x8, 5 } }, .isa_id = 's', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27707,9 +27514,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe0201c, .op = 0xdc200008 }, .id = HEX_INS_A4_VCMPHGTI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x8, 5 } }, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x8, 5 } }, .isa_id = 's', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27721,9 +27528,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe0301c, .op = 0xdc400008 }, .id = HEX_INS_A4_VCMPHGTUI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x7, 5 } }, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x7, 5 } }, .isa_id = 'u', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27735,9 +27542,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe0201c, .op = 0xdc000010 }, .id = HEX_INS_A4_VCMPWEQI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x8, 5 } }, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x8, 5 } }, .isa_id = 's', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27749,9 +27556,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe0201c, .op = 0xdc200010 }, .id = HEX_INS_A4_VCMPWGTI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x8, 5 } }, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x8, 5 } }, .isa_id = 's', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27763,9 +27570,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe0301c, .op = 0xdc400010 }, .id = HEX_INS_A4_VCMPWGTUI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x7, 5 } }, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x7, 5 } }, .isa_id = 'u', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27777,9 +27584,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020fc, .op = 0xd2002020 }, .id = HEX_INS_A6_VCMPBEQ_NOTANY, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 19 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27791,9 +27598,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020fc, .op = 0xd2800000 }, .id = HEX_INS_C2_CMPEQP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27805,9 +27612,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020fc, .op = 0xd2800040 }, .id = HEX_INS_C2_CMPGTP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27819,9 +27626,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020fc, .op = 0xd2800080 }, .id = HEX_INS_C2_CMPGTUP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27833,10 +27640,10 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe02080, .op = 0xd1000000 }, .id = HEX_INS_C2_VMUX, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27848,9 +27655,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe03c1c, .op = 0xdc800010 }, .id = HEX_INS_F2_DFCLASS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 5 } }, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x5, 5 } }, .isa_id = 'u', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27862,9 +27669,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020fc, .op = 0xd2e00000 }, .id = HEX_INS_F2_DFCMPEQ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27876,9 +27683,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020fc, .op = 0xd2e00040 }, .id = HEX_INS_F2_DFCMPGE, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27890,9 +27697,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020fc, .op = 0xd2e00020 }, .id = HEX_INS_F2_DFCMPGT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27904,9 +27711,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020fc, .op = 0xd2e00060 }, .id = HEX_INS_F2_DFCMPUO, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27918,8 +27725,8 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffdf0000, .op = 0xd9400000 }, .id = HEX_INS_F2_DFIMM_N, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x9, 5 }, { 0x1, 21 } }, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x9, 5 }, { 0x1, 21 } }, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27931,8 +27738,8 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffdf0000, .op = 0xd9000000 }, .id = HEX_INS_F2_DFIMM_P, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x9, 5 }, { 0x1, 21 } }, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x9, 5 }, { 0x1, 21 } }, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27944,8 +27751,8 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffdf0000, .op = 0xd6400000 }, .id = HEX_INS_F2_SFIMM_N, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x9, 5 }, { 0x1, 21 } }, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x9, 5 }, { 0x1, 21 } }, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27957,8 +27764,8 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffdf0000, .op = 0xd6000000 }, .id = HEX_INS_F2_SFIMM_P, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x9, 5 }, { 0x1, 21 } }, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x9, 5 }, { 0x1, 21 } }, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27970,10 +27777,10 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xff000000, .op = 0xd8000000 }, .id = HEX_INS_M4_MPYRI_ADDI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x3, 5 }, { 0x1, 13 }, { 0x2, 21 } }, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 0 }, { 0x1, 23 } }, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x3, 5 }, { 0x1, 13 }, { 0x2, 21 } }, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 0 }, { 0x1, 23 } }, .isa_id = 'U', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -27985,10 +27792,10 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xff800000, .op = 0xdf800000 }, .id = HEX_INS_M4_MPYRI_ADDR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x3, 5 }, { 0x1, 13 }, { 0x2, 21 } }, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x3, 5 }, { 0x1, 13 }, { 0x2, 21 } }, .isa_id = 'u', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28000,10 +27807,10 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xff800000, .op = 0xdf000000 }, .id = HEX_INS_M4_MPYRI_ADDR_U2, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 5 }, { 0x1, 13 }, { 0x2, 21 } }, .imm_scale = 2, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x3, 5 }, { 0x1, 13 }, { 0x2, 21 } }, .isa_id = 'u', .imm_scale = 2, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28015,10 +27822,10 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xff800000, .op = 0xd7000000 }, .id = HEX_INS_M4_MPYRR_ADDI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x3, 5 }, { 0x1, 13 }, { 0x2, 21 } }, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x3, 5 }, { 0x1, 13 }, { 0x2, 21 } }, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28030,9 +27837,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd0000000 }, .id = HEX_INS_S2_PARITYP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28044,10 +27851,10 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xff800000, .op = 0xdb000000 }, .id = HEX_INS_S4_ADDADDI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x3, 5 }, { 0x1, 13 }, { 0x2, 21 } }, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x3, 5 }, { 0x1, 13 }, { 0x2, 21 } }, .isa_id = 's', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28059,10 +27866,10 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xff000017, .op = 0xde000004 }, .id = HEX_INS_S4_ADDI_ASL_RI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 3 }, { 0x3, 5 }, { 0x1, 13 }, { 0x3, 21 } }, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 3 }, { 0x3, 5 }, { 0x1, 13 }, { 0x3, 21 } }, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28074,10 +27881,10 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xff000017, .op = 0xde000014 }, .id = HEX_INS_S4_ADDI_LSR_RI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 3 }, { 0x3, 5 }, { 0x1, 13 }, { 0x3, 21 } }, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 3 }, { 0x3, 5 }, { 0x1, 13 }, { 0x3, 21 } }, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28089,10 +27896,10 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xff000017, .op = 0xde000000 }, .id = HEX_INS_S4_ANDI_ASL_RI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 3 }, { 0x3, 5 }, { 0x1, 13 }, { 0x3, 21 } }, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 3 }, { 0x3, 5 }, { 0x1, 13 }, { 0x3, 21 } }, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28104,10 +27911,10 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xff000017, .op = 0xde000010 }, .id = HEX_INS_S4_ANDI_LSR_RI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 3 }, { 0x3, 5 }, { 0x1, 13 }, { 0x3, 21 } }, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 3 }, { 0x3, 5 }, { 0x1, 13 }, { 0x3, 21 } }, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28119,9 +27926,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffc00000, .op = 0xda000000 }, .id = HEX_INS_S4_OR_ANDI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x1, 21 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x1, 21 } }, .isa_id = 's', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28133,10 +27940,10 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffc00000, .op = 0xda400000 }, .id = HEX_INS_S4_OR_ANDIX, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x1, 21 } }, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x1, 21 } }, .isa_id = 's', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28148,9 +27955,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffc00000, .op = 0xda800000 }, .id = HEX_INS_S4_OR_ORI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x1, 21 } }, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x9, 5 }, { 0x1, 21 } }, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28162,10 +27969,10 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xff000017, .op = 0xde000002 }, .id = HEX_INS_S4_ORI_ASL_RI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 3 }, { 0x3, 5 }, { 0x1, 13 }, { 0x3, 21 } }, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 3 }, { 0x3, 5 }, { 0x1, 13 }, { 0x3, 21 } }, .isa_id = 'u', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28177,10 +27984,10 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xff000017, .op = 0xde000012 }, .id = HEX_INS_S4_ORI_LSR_RI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 3 }, { 0x3, 5 }, { 0x1, 13 }, { 0x3, 21 } }, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 3 }, { 0x3, 5 }, { 0x1, 13 }, { 0x3, 21 } }, .isa_id = 'u', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28192,9 +27999,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd5e00000 }, .id = HEX_INS_S4_PARITY, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28206,10 +28013,10 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xff800000, .op = 0xdb800000 }, .id = HEX_INS_S4_SUBADDI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x3, 5 }, { 0x1, 13 }, { 0x2, 21 } }, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x3, 5 }, { 0x1, 13 }, { 0x2, 21 } }, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28221,10 +28028,10 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xff000017, .op = 0xde000006 }, .id = HEX_INS_S4_SUBI_ASL_RI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 3 }, { 0x3, 5 }, { 0x1, 13 }, { 0x3, 21 } }, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 3 }, { 0x3, 5 }, { 0x1, 13 }, { 0x3, 21 } }, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28236,10 +28043,10 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xff000017, .op = 0xde000016 }, .id = HEX_INS_S4_SUBI_LSR_RI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 3 }, { 0x3, 5 }, { 0x1, 13 }, { 0x3, 21 } }, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x1, 3 }, { 0x3, 5 }, { 0x1, 13 }, { 0x3, 21 } }, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM, .masks = { { 0x5, 8 } }, .isa_id = 'U', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28251,9 +28058,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd5800000 }, .id = HEX_INS_DEP_A2_ADDSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28265,9 +28072,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd5800080 }, .id = HEX_INS_DEP_A2_SUBSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28279,9 +28086,9 @@ static const HexInsnTemplate templates_normal_0xd[] = { .encoding = { .mask = 0xffe020e0, .op = 0xd4000000 }, .id = HEX_INS_DEP_S2_PACKHL, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28297,9 +28104,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8400020 }, .id = HEX_INS_A2_VRADDUB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28311,9 +28118,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xea400020 }, .id = HEX_INS_A2_VRADDUB_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28325,9 +28132,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8400040 }, .id = HEX_INS_A2_VRSADUB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28339,9 +28146,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xea400040 }, .id = HEX_INS_A2_VRSADUB_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28353,10 +28160,10 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe02080, .op = 0xeaa00000 }, .id = HEX_INS_A5_ACS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 1 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'e', .syntax = 1 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28368,10 +28175,10 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe02080, .op = 0xeae00000 }, .id = HEX_INS_A6_VMINUB_RDP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 1 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'e', .syntax = 1 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28383,9 +28190,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8000060 }, .id = HEX_INS_F2_DFADD, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28397,9 +28204,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8200060 }, .id = HEX_INS_F2_DFMAX, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28411,9 +28218,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8c00060 }, .id = HEX_INS_F2_DFMIN, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28425,9 +28232,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8400060 }, .id = HEX_INS_F2_DFMPYFIX, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28439,9 +28246,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xea800060 }, .id = HEX_INS_F2_DFMPYHH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28453,9 +28260,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xea000060 }, .id = HEX_INS_F2_DFMPYLH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28467,9 +28274,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8a00060 }, .id = HEX_INS_F2_DFMPYLL, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28481,9 +28288,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8800060 }, .id = HEX_INS_F2_DFSUB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28495,9 +28302,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeb000000 }, .id = HEX_INS_F2_SFADD, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28509,9 +28316,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xebc00020 }, .id = HEX_INS_F2_SFFIXUPD, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28523,9 +28330,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xebc00000 }, .id = HEX_INS_F2_SFFIXUPN, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28537,9 +28344,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xef000080 }, .id = HEX_INS_F2_SFFMA, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28551,9 +28358,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xef0000c0 }, .id = HEX_INS_F2_SFFMA_LIB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28565,10 +28372,10 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe02080, .op = 0xef600080 }, .id = HEX_INS_F2_SFFMA_SC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28580,9 +28387,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xef0000a0 }, .id = HEX_INS_F2_SFFMS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28594,9 +28401,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xef0000e0 }, .id = HEX_INS_F2_SFFMS_LIB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28608,9 +28415,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeb800000 }, .id = HEX_INS_F2_SFMAX, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28622,9 +28429,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeb800020 }, .id = HEX_INS_F2_SFMIN, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28636,9 +28443,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeb400000 }, .id = HEX_INS_F2_SFMPY, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28650,10 +28457,10 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe02080, .op = 0xebe00080 }, .id = HEX_INS_F2_SFRECIPA, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 1 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'e', .syntax = 1 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28665,9 +28472,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeb000020 }, .id = HEX_INS_F2_SFSUB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28679,9 +28486,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xef000020 }, .id = HEX_INS_M2_ACCI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28693,9 +28500,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe02000, .op = 0xe2000000 }, .id = HEX_INS_M2_ACCII, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .isa_id = 's', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28707,9 +28514,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe7000020 }, .id = HEX_INS_M2_CMACI_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28721,9 +28528,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe7000040 }, .id = HEX_INS_M2_CMACR_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28735,9 +28542,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe70000c0 }, .id = HEX_INS_M2_CMACS_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28749,9 +28556,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe78000c0 }, .id = HEX_INS_M2_CMACS_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28763,9 +28570,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe74000c0 }, .id = HEX_INS_M2_CMACSC_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28777,9 +28584,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe7c000c0 }, .id = HEX_INS_M2_CMACSC_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28791,9 +28598,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe5000020 }, .id = HEX_INS_M2_CMPYI_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28805,9 +28612,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe5000040 }, .id = HEX_INS_M2_CMPYR_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28819,9 +28626,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xed2000c0 }, .id = HEX_INS_M2_CMPYRS_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28833,9 +28640,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeda000c0 }, .id = HEX_INS_M2_CMPYRS_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28847,9 +28654,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xed6000c0 }, .id = HEX_INS_M2_CMPYRSC_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28861,9 +28668,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xede000c0 }, .id = HEX_INS_M2_CMPYRSC_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28875,9 +28682,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe50000c0 }, .id = HEX_INS_M2_CMPYS_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28889,9 +28696,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe58000c0 }, .id = HEX_INS_M2_CMPYS_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28903,9 +28710,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe54000c0 }, .id = HEX_INS_M2_CMPYSC_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28917,9 +28724,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe5c000c0 }, .id = HEX_INS_M2_CMPYSC_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28931,9 +28738,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe70000e0 }, .id = HEX_INS_M2_CNACS_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28945,9 +28752,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe78000e0 }, .id = HEX_INS_M2_CNACS_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28959,9 +28766,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe74000e0 }, .id = HEX_INS_M2_CNACSC_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28973,9 +28780,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe7c000e0 }, .id = HEX_INS_M2_CNACSC_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -28987,9 +28794,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe7000000 }, .id = HEX_INS_M2_DPMPYSS_ACC_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29001,9 +28808,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe7200000 }, .id = HEX_INS_M2_DPMPYSS_NAC_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29015,9 +28822,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xed200020 }, .id = HEX_INS_M2_DPMPYSS_RND_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29029,9 +28836,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe5000000 }, .id = HEX_INS_M2_DPMPYSS_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29043,9 +28850,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe7400000 }, .id = HEX_INS_M2_DPMPYUU_ACC_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29057,9 +28864,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe7600000 }, .id = HEX_INS_M2_DPMPYUU_NAC_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29071,9 +28878,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe5400000 }, .id = HEX_INS_M2_DPMPYUU_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29085,9 +28892,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeda00080 }, .id = HEX_INS_M2_HMMPYH_RS1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29099,9 +28906,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeda00000 }, .id = HEX_INS_M2_HMMPYH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29113,9 +28920,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xede00080 }, .id = HEX_INS_M2_HMMPYL_RS1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29127,9 +28934,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeda00020 }, .id = HEX_INS_M2_HMMPYL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29141,9 +28948,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xef000000 }, .id = HEX_INS_M2_MACI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29155,9 +28962,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe02000, .op = 0xe1800000 }, .id = HEX_INS_M2_MACSIN, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29169,9 +28976,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe02000, .op = 0xe1000000 }, .id = HEX_INS_M2_MACSIP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29183,9 +28990,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xea2000e0 }, .id = HEX_INS_M2_MMACHS_RS0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29197,9 +29004,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeaa000e0 }, .id = HEX_INS_M2_MMACHS_RS1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29211,9 +29018,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xea0000e0 }, .id = HEX_INS_M2_MMACHS_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29225,9 +29032,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xea8000e0 }, .id = HEX_INS_M2_MMACHS_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29239,9 +29046,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xea2000a0 }, .id = HEX_INS_M2_MMACLS_RS0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29253,9 +29060,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeaa000a0 }, .id = HEX_INS_M2_MMACLS_RS1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29267,9 +29074,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xea0000a0 }, .id = HEX_INS_M2_MMACLS_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29281,9 +29088,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xea8000a0 }, .id = HEX_INS_M2_MMACLS_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29295,9 +29102,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xea6000e0 }, .id = HEX_INS_M2_MMACUHS_RS0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29309,9 +29116,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeae000e0 }, .id = HEX_INS_M2_MMACUHS_RS1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29323,9 +29130,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xea4000e0 }, .id = HEX_INS_M2_MMACUHS_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29337,9 +29144,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeac000e0 }, .id = HEX_INS_M2_MMACUHS_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29351,9 +29158,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xea6000a0 }, .id = HEX_INS_M2_MMACULS_RS0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29365,9 +29172,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeae000a0 }, .id = HEX_INS_M2_MMACULS_RS1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29379,9 +29186,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xea4000a0 }, .id = HEX_INS_M2_MMACULS_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29393,9 +29200,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeac000a0 }, .id = HEX_INS_M2_MMACULS_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29407,9 +29214,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe82000e0 }, .id = HEX_INS_M2_MMPYH_RS0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29421,9 +29228,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8a000e0 }, .id = HEX_INS_M2_MMPYH_RS1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29435,9 +29242,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe80000e0 }, .id = HEX_INS_M2_MMPYH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29449,9 +29256,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe88000e0 }, .id = HEX_INS_M2_MMPYH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29463,9 +29270,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe82000a0 }, .id = HEX_INS_M2_MMPYL_RS0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29477,9 +29284,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8a000a0 }, .id = HEX_INS_M2_MMPYL_RS1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29491,9 +29298,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe80000a0 }, .id = HEX_INS_M2_MMPYL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29505,9 +29312,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe88000a0 }, .id = HEX_INS_M2_MMPYL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29519,9 +29326,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe86000e0 }, .id = HEX_INS_M2_MMPYUH_RS0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29533,9 +29340,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8e000e0 }, .id = HEX_INS_M2_MMPYUH_RS1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29547,9 +29354,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe84000e0 }, .id = HEX_INS_M2_MMPYUH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29561,9 +29368,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8c000e0 }, .id = HEX_INS_M2_MMPYUH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29575,9 +29382,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe86000a0 }, .id = HEX_INS_M2_MMPYUL_RS0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29589,9 +29396,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8e000a0 }, .id = HEX_INS_M2_MMPYUL_RS1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29603,9 +29410,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe84000a0 }, .id = HEX_INS_M2_MMPYUL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29617,9 +29424,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8c000a0 }, .id = HEX_INS_M2_MMPYUL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29631,9 +29438,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xef800000 }, .id = HEX_INS_M2_MNACI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29645,9 +29452,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xee000060 }, .id = HEX_INS_M2_MPY_ACC_HH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29659,9 +29466,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xee800060 }, .id = HEX_INS_M2_MPY_ACC_HH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29673,9 +29480,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xee000040 }, .id = HEX_INS_M2_MPY_ACC_HL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29687,9 +29494,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xee800040 }, .id = HEX_INS_M2_MPY_ACC_HL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29701,9 +29508,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xee000020 }, .id = HEX_INS_M2_MPY_ACC_LH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29715,9 +29522,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xee800020 }, .id = HEX_INS_M2_MPY_ACC_LH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29729,9 +29536,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xee000000 }, .id = HEX_INS_M2_MPY_ACC_LL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29743,9 +29550,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xee800000 }, .id = HEX_INS_M2_MPY_ACC_LL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29757,9 +29564,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xee0000e0 }, .id = HEX_INS_M2_MPY_ACC_SAT_HH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29771,9 +29578,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xee8000e0 }, .id = HEX_INS_M2_MPY_ACC_SAT_HH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29785,9 +29592,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xee0000c0 }, .id = HEX_INS_M2_MPY_ACC_SAT_HL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29799,9 +29606,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xee8000c0 }, .id = HEX_INS_M2_MPY_ACC_SAT_HL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29813,9 +29620,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xee0000a0 }, .id = HEX_INS_M2_MPY_ACC_SAT_LH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29827,9 +29634,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xee8000a0 }, .id = HEX_INS_M2_MPY_ACC_SAT_LH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29841,9 +29648,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xee000080 }, .id = HEX_INS_M2_MPY_ACC_SAT_LL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29855,9 +29662,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xee800080 }, .id = HEX_INS_M2_MPY_ACC_SAT_LL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29869,9 +29676,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xec000060 }, .id = HEX_INS_M2_MPY_HH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29883,9 +29690,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xec800060 }, .id = HEX_INS_M2_MPY_HH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29897,9 +29704,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xec000040 }, .id = HEX_INS_M2_MPY_HL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29911,9 +29718,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xec800040 }, .id = HEX_INS_M2_MPY_HL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29925,9 +29732,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xec000020 }, .id = HEX_INS_M2_MPY_LH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29939,9 +29746,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xec800020 }, .id = HEX_INS_M2_MPY_LH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29953,9 +29760,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xec000000 }, .id = HEX_INS_M2_MPY_LL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29967,9 +29774,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xec800000 }, .id = HEX_INS_M2_MPY_LL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29981,9 +29788,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xee200060 }, .id = HEX_INS_M2_MPY_NAC_HH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -29995,9 +29802,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeea00060 }, .id = HEX_INS_M2_MPY_NAC_HH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30009,9 +29816,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xee200040 }, .id = HEX_INS_M2_MPY_NAC_HL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30023,9 +29830,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeea00040 }, .id = HEX_INS_M2_MPY_NAC_HL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30037,9 +29844,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xee200020 }, .id = HEX_INS_M2_MPY_NAC_LH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30051,9 +29858,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeea00020 }, .id = HEX_INS_M2_MPY_NAC_LH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30065,9 +29872,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xee200000 }, .id = HEX_INS_M2_MPY_NAC_LL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30079,9 +29886,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeea00000 }, .id = HEX_INS_M2_MPY_NAC_LL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30093,9 +29900,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xee2000e0 }, .id = HEX_INS_M2_MPY_NAC_SAT_HH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30107,9 +29914,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeea000e0 }, .id = HEX_INS_M2_MPY_NAC_SAT_HH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30121,9 +29928,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xee2000c0 }, .id = HEX_INS_M2_MPY_NAC_SAT_HL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30135,9 +29942,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeea000c0 }, .id = HEX_INS_M2_MPY_NAC_SAT_HL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30149,9 +29956,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xee2000a0 }, .id = HEX_INS_M2_MPY_NAC_SAT_LH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30163,9 +29970,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeea000a0 }, .id = HEX_INS_M2_MPY_NAC_SAT_LH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30177,9 +29984,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xee200080 }, .id = HEX_INS_M2_MPY_NAC_SAT_LL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30191,9 +29998,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeea00080 }, .id = HEX_INS_M2_MPY_NAC_SAT_LL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30205,9 +30012,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xec200060 }, .id = HEX_INS_M2_MPY_RND_HH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30219,9 +30026,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeca00060 }, .id = HEX_INS_M2_MPY_RND_HH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30233,9 +30040,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xec200040 }, .id = HEX_INS_M2_MPY_RND_HL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30247,9 +30054,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeca00040 }, .id = HEX_INS_M2_MPY_RND_HL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30261,9 +30068,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xec200020 }, .id = HEX_INS_M2_MPY_RND_LH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30275,9 +30082,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeca00020 }, .id = HEX_INS_M2_MPY_RND_LH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30289,9 +30096,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xec200000 }, .id = HEX_INS_M2_MPY_RND_LL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30303,9 +30110,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeca00000 }, .id = HEX_INS_M2_MPY_RND_LL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30317,9 +30124,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xec0000e0 }, .id = HEX_INS_M2_MPY_SAT_HH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30331,9 +30138,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xec8000e0 }, .id = HEX_INS_M2_MPY_SAT_HH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30345,9 +30152,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xec0000c0 }, .id = HEX_INS_M2_MPY_SAT_HL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30359,9 +30166,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xec8000c0 }, .id = HEX_INS_M2_MPY_SAT_HL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30373,9 +30180,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xec0000a0 }, .id = HEX_INS_M2_MPY_SAT_LH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30387,9 +30194,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xec8000a0 }, .id = HEX_INS_M2_MPY_SAT_LH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30401,9 +30208,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xec000080 }, .id = HEX_INS_M2_MPY_SAT_LL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30415,9 +30222,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xec800080 }, .id = HEX_INS_M2_MPY_SAT_LL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30429,9 +30236,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xec2000e0 }, .id = HEX_INS_M2_MPY_SAT_RND_HH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30443,9 +30250,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeca000e0 }, .id = HEX_INS_M2_MPY_SAT_RND_HH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30457,9 +30264,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xec2000c0 }, .id = HEX_INS_M2_MPY_SAT_RND_HL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30471,9 +30278,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeca000c0 }, .id = HEX_INS_M2_MPY_SAT_RND_HL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30485,9 +30292,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xec2000a0 }, .id = HEX_INS_M2_MPY_SAT_RND_LH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30499,9 +30306,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeca000a0 }, .id = HEX_INS_M2_MPY_SAT_RND_LH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30513,9 +30320,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xec200080 }, .id = HEX_INS_M2_MPY_SAT_RND_LL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30527,9 +30334,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeca00080 }, .id = HEX_INS_M2_MPY_SAT_RND_LL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30541,9 +30348,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xed000020 }, .id = HEX_INS_M2_MPY_UP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30555,9 +30362,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeda00040 }, .id = HEX_INS_M2_MPY_UP_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30569,9 +30376,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xede00000 }, .id = HEX_INS_M2_MPY_UP_S1_SAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30583,9 +30390,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe6000060 }, .id = HEX_INS_M2_MPYD_ACC_HH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30597,9 +30404,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe6800060 }, .id = HEX_INS_M2_MPYD_ACC_HH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30611,9 +30418,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe6000040 }, .id = HEX_INS_M2_MPYD_ACC_HL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30625,9 +30432,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe6800040 }, .id = HEX_INS_M2_MPYD_ACC_HL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30639,9 +30446,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe6000020 }, .id = HEX_INS_M2_MPYD_ACC_LH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30653,9 +30460,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe6800020 }, .id = HEX_INS_M2_MPYD_ACC_LH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30667,9 +30474,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe6000000 }, .id = HEX_INS_M2_MPYD_ACC_LL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30681,9 +30488,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe6800000 }, .id = HEX_INS_M2_MPYD_ACC_LL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30695,9 +30502,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe4000060 }, .id = HEX_INS_M2_MPYD_HH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30709,9 +30516,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe4800060 }, .id = HEX_INS_M2_MPYD_HH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30723,9 +30530,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe4000040 }, .id = HEX_INS_M2_MPYD_HL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30737,9 +30544,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe4800040 }, .id = HEX_INS_M2_MPYD_HL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30751,9 +30558,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe4000020 }, .id = HEX_INS_M2_MPYD_LH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30765,9 +30572,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe4800020 }, .id = HEX_INS_M2_MPYD_LH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30779,9 +30586,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe4000000 }, .id = HEX_INS_M2_MPYD_LL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30793,9 +30600,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe4800000 }, .id = HEX_INS_M2_MPYD_LL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30807,9 +30614,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe6200060 }, .id = HEX_INS_M2_MPYD_NAC_HH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30821,9 +30628,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe6a00060 }, .id = HEX_INS_M2_MPYD_NAC_HH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30835,9 +30642,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe6200040 }, .id = HEX_INS_M2_MPYD_NAC_HL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30849,9 +30656,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe6a00040 }, .id = HEX_INS_M2_MPYD_NAC_HL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30863,9 +30670,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe6200020 }, .id = HEX_INS_M2_MPYD_NAC_LH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30877,9 +30684,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe6a00020 }, .id = HEX_INS_M2_MPYD_NAC_LH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30891,9 +30698,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe6200000 }, .id = HEX_INS_M2_MPYD_NAC_LL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30905,9 +30712,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe6a00000 }, .id = HEX_INS_M2_MPYD_NAC_LL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30919,9 +30726,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe4200060 }, .id = HEX_INS_M2_MPYD_RND_HH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30933,9 +30740,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe4a00060 }, .id = HEX_INS_M2_MPYD_RND_HH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30947,9 +30754,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe4200040 }, .id = HEX_INS_M2_MPYD_RND_HL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30961,9 +30768,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe4a00040 }, .id = HEX_INS_M2_MPYD_RND_HL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30975,9 +30782,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe4200020 }, .id = HEX_INS_M2_MPYD_RND_LH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -30989,9 +30796,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe4a00020 }, .id = HEX_INS_M2_MPYD_RND_LH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31003,9 +30810,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe4200000 }, .id = HEX_INS_M2_MPYD_RND_LL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31017,9 +30824,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe4a00000 }, .id = HEX_INS_M2_MPYD_RND_LL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31031,9 +30838,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xed000000 }, .id = HEX_INS_M2_MPYI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31045,9 +30852,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe02000, .op = 0xe0800000 }, .id = HEX_INS_M2_MPYSIN, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x8, 5 } }, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE, .masks = { { 0x8, 5 } }, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31059,9 +30866,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe02000, .op = 0xe0000000 }, .id = HEX_INS_M2_MPYSIP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .isa_id = 'u', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31073,9 +30880,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xed600020 }, .id = HEX_INS_M2_MPYSU_UP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31087,9 +30894,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xee400060 }, .id = HEX_INS_M2_MPYU_ACC_HH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31101,9 +30908,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeec00060 }, .id = HEX_INS_M2_MPYU_ACC_HH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31115,9 +30922,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xee400040 }, .id = HEX_INS_M2_MPYU_ACC_HL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31129,9 +30936,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeec00040 }, .id = HEX_INS_M2_MPYU_ACC_HL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31143,9 +30950,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xee400020 }, .id = HEX_INS_M2_MPYU_ACC_LH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31157,9 +30964,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeec00020 }, .id = HEX_INS_M2_MPYU_ACC_LH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31171,9 +30978,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xee400000 }, .id = HEX_INS_M2_MPYU_ACC_LL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31185,9 +30992,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeec00000 }, .id = HEX_INS_M2_MPYU_ACC_LL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31199,9 +31006,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xec400060 }, .id = HEX_INS_M2_MPYU_HH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31213,9 +31020,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xecc00060 }, .id = HEX_INS_M2_MPYU_HH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31227,9 +31034,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xec400040 }, .id = HEX_INS_M2_MPYU_HL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31241,9 +31048,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xecc00040 }, .id = HEX_INS_M2_MPYU_HL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31255,9 +31062,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xec400020 }, .id = HEX_INS_M2_MPYU_LH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31269,9 +31076,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xecc00020 }, .id = HEX_INS_M2_MPYU_LH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31283,9 +31090,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xec400000 }, .id = HEX_INS_M2_MPYU_LL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31297,9 +31104,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xecc00000 }, .id = HEX_INS_M2_MPYU_LL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31311,9 +31118,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xee600060 }, .id = HEX_INS_M2_MPYU_NAC_HH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31325,9 +31132,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeee00060 }, .id = HEX_INS_M2_MPYU_NAC_HH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31339,9 +31146,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xee600040 }, .id = HEX_INS_M2_MPYU_NAC_HL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31353,9 +31160,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeee00040 }, .id = HEX_INS_M2_MPYU_NAC_HL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31367,9 +31174,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xee600020 }, .id = HEX_INS_M2_MPYU_NAC_LH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31381,9 +31188,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeee00020 }, .id = HEX_INS_M2_MPYU_NAC_LH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31395,9 +31202,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xee600000 }, .id = HEX_INS_M2_MPYU_NAC_LL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31409,9 +31216,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeee00000 }, .id = HEX_INS_M2_MPYU_NAC_LL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31423,9 +31230,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xed400020 }, .id = HEX_INS_M2_MPYU_UP, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31437,9 +31244,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe6400060 }, .id = HEX_INS_M2_MPYUD_ACC_HH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31451,9 +31258,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe6c00060 }, .id = HEX_INS_M2_MPYUD_ACC_HH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31465,9 +31272,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe6400040 }, .id = HEX_INS_M2_MPYUD_ACC_HL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31479,9 +31286,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe6c00040 }, .id = HEX_INS_M2_MPYUD_ACC_HL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31493,9 +31300,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe6400020 }, .id = HEX_INS_M2_MPYUD_ACC_LH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31507,9 +31314,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe6c00020 }, .id = HEX_INS_M2_MPYUD_ACC_LH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31521,9 +31328,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe6400000 }, .id = HEX_INS_M2_MPYUD_ACC_LL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31535,9 +31342,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe6c00000 }, .id = HEX_INS_M2_MPYUD_ACC_LL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31549,9 +31356,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe4400060 }, .id = HEX_INS_M2_MPYUD_HH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31563,9 +31370,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe4c00060 }, .id = HEX_INS_M2_MPYUD_HH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31577,9 +31384,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe4400040 }, .id = HEX_INS_M2_MPYUD_HL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31591,9 +31398,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe4c00040 }, .id = HEX_INS_M2_MPYUD_HL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31605,9 +31412,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe4400020 }, .id = HEX_INS_M2_MPYUD_LH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31619,9 +31426,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe4c00020 }, .id = HEX_INS_M2_MPYUD_LH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31633,9 +31440,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe4400000 }, .id = HEX_INS_M2_MPYUD_LL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31647,9 +31454,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe4c00000 }, .id = HEX_INS_M2_MPYUD_LL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31661,9 +31468,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe6600060 }, .id = HEX_INS_M2_MPYUD_NAC_HH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31675,9 +31482,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe6e00060 }, .id = HEX_INS_M2_MPYUD_NAC_HH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31689,9 +31496,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe6600040 }, .id = HEX_INS_M2_MPYUD_NAC_HL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31703,9 +31510,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe6e00040 }, .id = HEX_INS_M2_MPYUD_NAC_HL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31717,9 +31524,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe6600020 }, .id = HEX_INS_M2_MPYUD_NAC_LH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31731,9 +31538,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe6e00020 }, .id = HEX_INS_M2_MPYUD_NAC_LH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31745,9 +31552,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe6600000 }, .id = HEX_INS_M2_MPYUD_NAC_LL_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31759,9 +31566,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe6e00000 }, .id = HEX_INS_M2_MPYUD_NAC_LL_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31773,9 +31580,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xef800020 }, .id = HEX_INS_M2_NACCI, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31787,9 +31594,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe02000, .op = 0xe2800000 }, .id = HEX_INS_M2_NACCII, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_IMM | HEX_OP_TEMPLATE_FLAG_IMM_SIGNED | HEX_OP_TEMPLATE_FLAG_IMM_EXTENDABLE | HEX_OP_TEMPLATE_FLAG_IMM_DOUBLE_HASH, .masks = { { 0x8, 5 } }, .isa_id = 's', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31801,9 +31608,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xef000060 }, .id = HEX_INS_M2_SUBACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31815,9 +31622,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8600000 }, .id = HEX_INS_M2_VABSDIFFH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31829,9 +31636,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8200000 }, .id = HEX_INS_M2_VABSDIFFW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31843,9 +31650,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xea400080 }, .id = HEX_INS_M2_VCMAC_S0_SAT_I, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31857,9 +31664,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xea200080 }, .id = HEX_INS_M2_VCMAC_S0_SAT_R, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31871,9 +31678,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe84000c0 }, .id = HEX_INS_M2_VCMPY_S0_SAT_I, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31885,9 +31692,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe82000c0 }, .id = HEX_INS_M2_VCMPY_S0_SAT_R, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31899,9 +31706,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8c000c0 }, .id = HEX_INS_M2_VCMPY_S1_SAT_I, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31913,9 +31720,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8a000c0 }, .id = HEX_INS_M2_VCMPY_S1_SAT_R, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31927,9 +31734,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xea000080 }, .id = HEX_INS_M2_VDMACS_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31941,9 +31748,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xea800080 }, .id = HEX_INS_M2_VDMACS_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31955,9 +31762,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe9000000 }, .id = HEX_INS_M2_VDMPYRS_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31969,9 +31776,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe9800000 }, .id = HEX_INS_M2_VDMPYRS_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31983,9 +31790,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8000080 }, .id = HEX_INS_M2_VDMPYS_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -31997,9 +31804,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8800080 }, .id = HEX_INS_M2_VDMPYS_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32011,9 +31818,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe7200020 }, .id = HEX_INS_M2_VMAC2, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32025,9 +31832,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xea200040 }, .id = HEX_INS_M2_VMAC2ES, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32039,9 +31846,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xea0000c0 }, .id = HEX_INS_M2_VMAC2ES_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32053,9 +31860,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xea8000c0 }, .id = HEX_INS_M2_VMAC2ES_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32067,9 +31874,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe70000a0 }, .id = HEX_INS_M2_VMAC2S_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32081,9 +31888,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe78000a0 }, .id = HEX_INS_M2_VMAC2S_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32095,9 +31902,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe76000a0 }, .id = HEX_INS_M2_VMAC2SU_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32109,9 +31916,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe7e000a0 }, .id = HEX_INS_M2_VMAC2SU_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32123,9 +31930,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe80000c0 }, .id = HEX_INS_M2_VMPY2ES_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32137,9 +31944,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe88000c0 }, .id = HEX_INS_M2_VMPY2ES_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32151,9 +31958,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe50000a0 }, .id = HEX_INS_M2_VMPY2S_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32165,9 +31972,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xed2000e0 }, .id = HEX_INS_M2_VMPY2S_S0PACK, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32179,9 +31986,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe58000a0 }, .id = HEX_INS_M2_VMPY2S_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32193,9 +32000,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeda000e0 }, .id = HEX_INS_M2_VMPY2S_S1PACK, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32207,9 +32014,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe50000e0 }, .id = HEX_INS_M2_VMPY2SU_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32221,9 +32028,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe58000e0 }, .id = HEX_INS_M2_VMPY2SU_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32235,9 +32042,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe92000e0 }, .id = HEX_INS_M2_VRADDH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32249,9 +32056,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe9000020 }, .id = HEX_INS_M2_VRADDUH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32263,9 +32070,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xea000000 }, .id = HEX_INS_M2_VRCMACI_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32277,9 +32084,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xea400000 }, .id = HEX_INS_M2_VRCMACI_S0C, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32291,9 +32098,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xea000020 }, .id = HEX_INS_M2_VRCMACR_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32305,9 +32112,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xea600020 }, .id = HEX_INS_M2_VRCMACR_S0C, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32319,9 +32126,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8000000 }, .id = HEX_INS_M2_VRCMPYI_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32333,9 +32140,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8400000 }, .id = HEX_INS_M2_VRCMPYI_S0C, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32347,9 +32154,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8000020 }, .id = HEX_INS_M2_VRCMPYR_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32361,9 +32168,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8600020 }, .id = HEX_INS_M2_VRCMPYR_S0C, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32375,9 +32182,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeaa00080 }, .id = HEX_INS_M2_VRCMPYS_ACC_S1_H, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32389,9 +32196,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeae00080 }, .id = HEX_INS_M2_VRCMPYS_ACC_S1_L, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32403,9 +32210,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8a00080 }, .id = HEX_INS_M2_VRCMPYS_S1_H, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32417,9 +32224,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8e00080 }, .id = HEX_INS_M2_VRCMPYS_S1_L, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32431,9 +32238,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe9a000c0 }, .id = HEX_INS_M2_VRCMPYS_S1RP_H, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32445,9 +32252,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe9a000e0 }, .id = HEX_INS_M2_VRCMPYS_S1RP_L, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32459,9 +32266,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xea000040 }, .id = HEX_INS_M2_VRMAC_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32473,9 +32280,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8000040 }, .id = HEX_INS_M2_VRMPY_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32487,9 +32294,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xef800060 }, .id = HEX_INS_M2_XOR_XACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32501,9 +32308,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xef400000 }, .id = HEX_INS_M4_AND_AND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32515,9 +32322,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xef200020 }, .id = HEX_INS_M4_AND_ANDN, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32529,9 +32336,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xef400020 }, .id = HEX_INS_M4_AND_OR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32543,9 +32350,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xef400040 }, .id = HEX_INS_M4_AND_XOR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32557,9 +32364,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xef600000 }, .id = HEX_INS_M4_MAC_UP_S1_SAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32571,10 +32378,10 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe3000000 }, .id = HEX_INS_M4_MPYRR_ADDR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'y', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'y', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32586,9 +32393,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xef600020 }, .id = HEX_INS_M4_NAC_UP_S1_SAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32600,9 +32407,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xef400060 }, .id = HEX_INS_M4_OR_AND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32614,9 +32421,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xef200000 }, .id = HEX_INS_M4_OR_ANDN, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32628,9 +32435,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xefc00000 }, .id = HEX_INS_M4_OR_OR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32642,9 +32449,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xefc00020 }, .id = HEX_INS_M4_OR_XOR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32656,9 +32463,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe54000e0 }, .id = HEX_INS_M4_PMPYW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32670,9 +32477,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe72000e0 }, .id = HEX_INS_M4_PMPYW_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32684,9 +32491,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe5c000e0 }, .id = HEX_INS_M4_VPMPYH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32698,9 +32505,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe7a000e0 }, .id = HEX_INS_M4_VPMPYH_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32712,9 +32519,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xea2000c0 }, .id = HEX_INS_M4_VRMPYEH_ACC_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32726,9 +32533,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeaa000c0 }, .id = HEX_INS_M4_VRMPYEH_ACC_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32740,9 +32547,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8400080 }, .id = HEX_INS_M4_VRMPYEH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32754,9 +32561,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8c00080 }, .id = HEX_INS_M4_VRMPYEH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32768,9 +32575,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xea6000c0 }, .id = HEX_INS_M4_VRMPYOH_ACC_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32782,9 +32589,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeae000c0 }, .id = HEX_INS_M4_VRMPYOH_ACC_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32796,9 +32603,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8200040 }, .id = HEX_INS_M4_VRMPYOH_S0, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32810,9 +32617,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8a00040 }, .id = HEX_INS_M4_VRMPYOH_S1, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32824,9 +32631,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xefc00040 }, .id = HEX_INS_M4_XOR_AND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32838,9 +32645,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xef200040 }, .id = HEX_INS_M4_XOR_ANDN, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32852,9 +32659,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xefc00060 }, .id = HEX_INS_M4_XOR_OR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32866,9 +32673,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xea200020 }, .id = HEX_INS_M5_VDMACBSU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32880,9 +32687,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8a00020 }, .id = HEX_INS_M5_VDMPYBSU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32894,9 +32701,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe7c00020 }, .id = HEX_INS_M5_VMACBSU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32908,9 +32715,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe7800020 }, .id = HEX_INS_M5_VMACBUU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32922,9 +32729,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe5400020 }, .id = HEX_INS_M5_VMPYBSU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32936,9 +32743,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe5800020 }, .id = HEX_INS_M5_VMPYBUU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32950,9 +32757,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeac00020 }, .id = HEX_INS_M5_VRMACBSU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32964,9 +32771,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xea800020 }, .id = HEX_INS_M5_VRMACBUU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32978,9 +32785,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8c00020 }, .id = HEX_INS_M5_VRMPYBSU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -32992,9 +32799,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8800020 }, .id = HEX_INS_M5_VRMPYBUU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33006,9 +32813,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8e00000 }, .id = HEX_INS_M6_VABSDIFFB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33020,9 +32827,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8a00000 }, .id = HEX_INS_M6_VABSDIFFUB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 15 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33034,9 +32841,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8600040 }, .id = HEX_INS_M7_DCMPYIW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33048,9 +32855,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xea600040 }, .id = HEX_INS_M7_DCMPYIW_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33062,9 +32869,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8e00040 }, .id = HEX_INS_M7_DCMPYIWC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33076,9 +32883,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xea4000c0 }, .id = HEX_INS_M7_DCMPYIWC_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33090,9 +32897,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8800040 }, .id = HEX_INS_M7_DCMPYRW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33104,9 +32911,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xea800040 }, .id = HEX_INS_M7_DCMPYRW_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33118,9 +32925,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe8c00040 }, .id = HEX_INS_M7_DCMPYRWC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33132,9 +32939,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xeac00040 }, .id = HEX_INS_M7_DCMPYRWC_ACC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'x', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33146,9 +32953,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe9200000 }, .id = HEX_INS_M7_WCMPYIW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33160,9 +32967,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe9a00000 }, .id = HEX_INS_M7_WCMPYIW_RND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33174,9 +32981,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe9000080 }, .id = HEX_INS_M7_WCMPYIWC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33188,9 +32995,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe9800080 }, .id = HEX_INS_M7_WCMPYIWC_RND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33202,9 +33009,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe9400000 }, .id = HEX_INS_M7_WCMPYRW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33216,9 +33023,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe9c00000 }, .id = HEX_INS_M7_WCMPYRW_RND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33230,9 +33037,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe9600000 }, .id = HEX_INS_M7_WCMPYRWC, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33244,9 +33051,9 @@ static const HexInsnTemplate templates_normal_0xe[] = { .encoding = { .mask = 0xffe020e0, .op = 0xe9e00000 }, .id = HEX_INS_M7_WCMPYRWC_RND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33262,9 +33069,9 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe020e0, .op = 0xf3000000 }, .id = HEX_INS_A2_ADD, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33276,9 +33083,9 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe020e0, .op = 0xf6400000 }, .id = HEX_INS_A2_ADDSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33290,9 +33097,9 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe020e0, .op = 0xf1000000 }, .id = HEX_INS_A2_AND, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33304,9 +33111,9 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe020e0, .op = 0xf3800000 }, .id = HEX_INS_A2_COMBINE_HH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33318,9 +33125,9 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe020e0, .op = 0xf3a00000 }, .id = HEX_INS_A2_COMBINE_HL, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33332,9 +33139,9 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe020e0, .op = 0xf3c00000 }, .id = HEX_INS_A2_COMBINE_LH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33346,9 +33153,9 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe020e0, .op = 0xf3e00000 }, .id = HEX_INS_A2_COMBINE_LL, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 14 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33360,9 +33167,9 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe020e0, .op = 0xf5000000 }, .id = HEX_INS_A2_COMBINEW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33374,9 +33181,9 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe020e0, .op = 0xf1200000 }, .id = HEX_INS_A2_OR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 7 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33388,10 +33195,10 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe02080, .op = 0xfb000080 }, .id = HEX_INS_A2_PADDF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -33404,10 +33211,10 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe02080, .op = 0xfb002080 }, .id = HEX_INS_A2_PADDFNEW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 19 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -33420,10 +33227,10 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe02080, .op = 0xfb000000 }, .id = HEX_INS_A2_PADDT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -33436,10 +33243,10 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe02080, .op = 0xfb002000 }, .id = HEX_INS_A2_PADDTNEW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 18 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -33452,10 +33259,10 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe02080, .op = 0xf9000080 }, .id = HEX_INS_A2_PANDF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -33468,10 +33275,10 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe02080, .op = 0xf9002080 }, .id = HEX_INS_A2_PANDFNEW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 19 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -33484,10 +33291,10 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe02080, .op = 0xf9000000 }, .id = HEX_INS_A2_PANDT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -33500,10 +33307,10 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe02080, .op = 0xf9002000 }, .id = HEX_INS_A2_PANDTNEW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 18 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -33516,10 +33323,10 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe02080, .op = 0xf9200080 }, .id = HEX_INS_A2_PORF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -33532,10 +33339,10 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe02080, .op = 0xf9202080 }, .id = HEX_INS_A2_PORFNEW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 18 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -33548,10 +33355,10 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe02080, .op = 0xf9200000 }, .id = HEX_INS_A2_PORT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -33564,10 +33371,10 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe02080, .op = 0xf9202000 }, .id = HEX_INS_A2_PORTNEW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 16 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 16 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 17 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -33580,10 +33387,10 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe02080, .op = 0xfb200080 }, .id = HEX_INS_A2_PSUBF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 15 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -33596,10 +33403,10 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe02080, .op = 0xfb202080 }, .id = HEX_INS_A2_PSUBFNEW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 19 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -33612,10 +33419,10 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe02080, .op = 0xfb200000 }, .id = HEX_INS_A2_PSUBT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 14 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -33628,10 +33435,10 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe02080, .op = 0xfb202000 }, .id = HEX_INS_A2_PSUBTNEW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 18 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -33644,10 +33451,10 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe02080, .op = 0xf9600080 }, .id = HEX_INS_A2_PXORF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 15 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 15 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -33660,10 +33467,10 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe02080, .op = 0xf9602080 }, .id = HEX_INS_A2_PXORFNEW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 19 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -33676,10 +33483,10 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe02080, .op = 0xf9600000 }, .id = HEX_INS_A2_PXORT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 14 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 14 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -33692,10 +33499,10 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe02080, .op = 0xf9602000 }, .id = HEX_INS_A2_PXORTNEW, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 18 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -33708,9 +33515,9 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe020e0, .op = 0xf3200000 }, .id = HEX_INS_A2_SUB, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33722,9 +33529,9 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe020e0, .op = 0xf6c00000 }, .id = HEX_INS_A2_SUBSAT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33736,9 +33543,9 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe020e0, .op = 0xf6000000 }, .id = HEX_INS_A2_SVADDH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33750,9 +33557,9 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe020e0, .op = 0xf6200000 }, .id = HEX_INS_A2_SVADDHS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33764,9 +33571,9 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe020e0, .op = 0xf6600000 }, .id = HEX_INS_A2_SVADDUHS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33778,9 +33585,9 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe020e0, .op = 0xf7000000 }, .id = HEX_INS_A2_SVAVGH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33792,9 +33599,9 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe020e0, .op = 0xf7200000 }, .id = HEX_INS_A2_SVAVGHS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33806,9 +33613,9 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe020e0, .op = 0xf7600000 }, .id = HEX_INS_A2_SVNAVGH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33820,9 +33627,9 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe020e0, .op = 0xf6800000 }, .id = HEX_INS_A2_SVSUBH, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33834,9 +33641,9 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe020e0, .op = 0xf6a00000 }, .id = HEX_INS_A2_SVSUBHS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33848,9 +33655,9 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe020e0, .op = 0xf6e00000 }, .id = HEX_INS_A2_SVSUBUHS, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33862,9 +33669,9 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe020e0, .op = 0xf1600000 }, .id = HEX_INS_A2_XOR, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33876,9 +33683,9 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe020e0, .op = 0xf1800000 }, .id = HEX_INS_A4_ANDN, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33890,9 +33697,9 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe020e0, .op = 0xf1a00000 }, .id = HEX_INS_A4_ORN, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33904,9 +33711,9 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe020e0, .op = 0xf3400000 }, .id = HEX_INS_A4_RCMPEQ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33918,9 +33725,9 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe020e0, .op = 0xf3600000 }, .id = HEX_INS_A4_RCMPNEQ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -33932,10 +33739,10 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe02080, .op = 0xfd000080 }, .id = HEX_INS_C2_CCOMBINEWF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 19 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 19 }, }, .pred = HEX_PRED_FALSE, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -33948,10 +33755,10 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe02080, .op = 0xfd002080 }, .id = HEX_INS_C2_CCOMBINEWNEWF, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 5 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 22 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 23 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 5 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 22 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 23 }, }, .pred = HEX_PRED_FALSE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_FALSE, @@ -33964,10 +33771,10 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe02080, .op = 0xfd002000 }, .id = HEX_INS_C2_CCOMBINEWNEWT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 21 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 22 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 21 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 22 }, }, .pred = HEX_PRED_TRUE | HEX_PRED_NEW, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -33980,10 +33787,10 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe02080, .op = 0xfd000000 }, .id = HEX_INS_C2_CCOMBINEWT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 4 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 6 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 17 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 18 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 4 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 6 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 17 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 18 }, }, .pred = HEX_PRED_TRUE, .cond = RZ_TYPE_COND_HEX_SCL_TRUE, @@ -33996,9 +33803,9 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe020fc, .op = 0xf2000000 }, .id = HEX_INS_C2_CMPEQ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -34010,9 +33817,9 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe020fc, .op = 0xf2400000 }, .id = HEX_INS_C2_CMPGT, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -34024,9 +33831,9 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe020fc, .op = 0xf2600000 }, .id = HEX_INS_C2_CMPGTU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -34038,10 +33845,10 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe02080, .op = 0xf4000000 }, .id = HEX_INS_C2_MUX, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 7 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 8 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 9 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x2, 5 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'u', .syntax = 7 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 8 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 9 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -34053,9 +33860,9 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe020fc, .op = 0xf2400010 }, .id = HEX_INS_C4_CMPLTE, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -34067,9 +33874,9 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe020fc, .op = 0xf2600010 }, .id = HEX_INS_C4_CMPLTEU, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 13 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 13 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -34081,9 +33888,9 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe020fc, .op = 0xf2000010 }, .id = HEX_INS_C4_CMPNEQ, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 12 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT, .masks = { { 0x2, 0 } }, .reg_cls = HEX_REG_CLASS_PRED_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 12 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -34095,9 +33902,9 @@ static const HexInsnTemplate templates_normal_0xf[] = { .encoding = { .mask = 0xffe020e0, .op = 0xf5800000 }, .id = HEX_INS_S2_PACKHL, .ops = { - { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .syntax = 0 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 10 }, - { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .syntax = 11 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG | HEX_OP_TEMPLATE_FLAG_REG_OUT | HEX_OP_TEMPLATE_FLAG_REG_PAIR, .masks = { { 0x5, 0 } }, .reg_cls = HEX_REG_CLASS_DOUBLE_REGS, .isa_id = 'd', .syntax = 0 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 16 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 's', .syntax = 10 }, + { .info = HEX_OP_TEMPLATE_TYPE_REG, .masks = { { 0x5, 8 } }, .reg_cls = HEX_REG_CLASS_INT_REGS, .isa_id = 't', .syntax = 11 }, }, .pred = HEX_NOPRED, .cond = RZ_TYPE_COND_AL, @@ -34136,7 +33943,7 @@ static const HexInsnTemplate *templates_normal[] = { static const HexInsnTemplate *get_sub_template_table(const ut8 duplex_iclass, bool high) { switch (duplex_iclass) { default: - RZ_LOG_WARN("IClasses > 0xe are reserved.\n"); + RZ_LOG_INFO("IClasses > 0xe are reserved.\n"); return NULL; case 0: return high ? templates_sub_L1 : templates_sub_L1; @@ -34263,9 +34070,11 @@ static void hex_disasm_with_templates(const HexInsnTemplate *tpl, HexState *stat hi->op_count++; hi->ops[i].attr = 0; + hi->ops[i].isa_id = op->isa_id; switch (type) { case HEX_OP_TEMPLATE_TYPE_IMM: { hi->ops[i].type = HEX_OP_TYPE_IMM; + hi->ops[i].class = op->info; ut32 bits_total; hi->ops[i].op.imm = hex_op_masks_extract(op->masks, hi_u32, &bits_total) << op->imm_scale; hi->ops[i].shift = op->imm_scale; @@ -34308,6 +34117,7 @@ static void hex_disasm_with_templates(const HexInsnTemplate *tpl, HexState *stat break; case HEX_OP_TEMPLATE_TYPE_REG: hi->ops[i].type = HEX_OP_TYPE_REG; + hi->ops[i].class = op->reg_cls; hi->ops[i].op.reg = hex_op_masks_extract(op->masks, hi_u32, NULL); if (op->info & HEX_OP_TEMPLATE_FLAG_REG_OUT) { hi->ops[i].attr |= HEX_OP_REG_OUT; @@ -34323,7 +34133,13 @@ static void hex_disasm_with_templates(const HexInsnTemplate *tpl, HexState *stat if (op->info & HEX_OP_TEMPLATE_FLAG_REG_N_REG) { regidx = resolve_n_register(hi->ops[i].op.reg, hic->addr, pkt); } - rz_strbuf_append(&sb, hex_get_reg_in_class(op->reg_cls, regidx, print_reg_alias)); + const char *reg_name = hex_get_reg_in_class(op->reg_cls, regidx, print_reg_alias, false, false); + if (!reg_name) { + rz_strbuf_append(&sb, ""); + hi->identifier = HEX_INS_INVALID_DECODE; + } else { + rz_strbuf_append(&sb, reg_name); + } break; default: rz_warn_if_reached(); @@ -34336,6 +34152,7 @@ static void hex_disasm_with_templates(const HexInsnTemplate *tpl, HexState *stat rz_strbuf_append_n(&sb, tpl->syntax + syntax_cur, syntax_len - syntax_cur); } strncpy(hi->text_infix, rz_strbuf_get(&sb), sizeof(hi->text_infix) - 1); + rz_strbuf_fini(&sb); // RzAnalysisOp contents hic->ana_op.addr = hic->addr; @@ -34396,7 +34213,7 @@ static void hex_set_invalid_duplex(const ut32 hi_u32, RZ_INOUT RZ_NONNULL HexIns HexInsn *hi_low = hic->bin.sub[1]; rz_return_if_fail(hi_high && hi_low); hic->identifier = HEX_INS_INVALID_DECODE; - hic->opcode = hi_u32; + hic->bytes = hi_u32; hi_high->opcode = (hi_u32 >> 16) & 0x1fff; hi_low->opcode = hi_u32 & 0x1fff; hi_high->identifier = HEX_INS_INVALID_DECODE; @@ -34410,22 +34227,27 @@ int hexagon_disasm_instruction(HexState *state, const ut32 hi_u32, RZ_INOUT HexI ut32 addr = hic->addr; if (hic->pkt_info.last_insn) { switch (hex_get_loop_flag(pkt)) { - default: break; + default: + pkt->hw_loop = HEX_NO_LOOP; + break; case HEX_LOOP_01: hic->ana_op.prefix = RZ_ANALYSIS_OP_PREFIX_HWLOOP_END; hic->ana_op.fail = pkt->hw_loop0_addr; hic->ana_op.jump = pkt->hw_loop1_addr; hic->ana_op.val = hic->ana_op.jump; + pkt->hw_loop = HEX_LOOP_01; break; case HEX_LOOP_0: hic->ana_op.prefix = RZ_ANALYSIS_OP_PREFIX_HWLOOP_END; hic->ana_op.jump = pkt->hw_loop0_addr; hic->ana_op.val = hic->ana_op.jump; + pkt->hw_loop = HEX_LOOP_0; break; case HEX_LOOP_1: hic->ana_op.prefix = RZ_ANALYSIS_OP_PREFIX_HWLOOP_END; hic->ana_op.jump = pkt->hw_loop1_addr; hic->ana_op.val = hic->ana_op.jump; + pkt->hw_loop = HEX_LOOP_1; break; } } @@ -34440,7 +34262,7 @@ int hexagon_disasm_instruction(HexState *state, const ut32 hi_u32, RZ_INOUT HexI ut32 iclass = (((hi_u32 >> 29) & 0xF) << 1) | ((hi_u32 >> 13) & 1); if (iclass == 0xf) { - RZ_LOG_WARN("Reserved duplex instruction class used at: 0x%" PFMT32x ".\n", addr); + RZ_LOG_INFO("Reserved duplex instruction class used at: 0x%" PFMT32x ".\n", addr); } const HexInsnTemplate *tmp_high = get_sub_template_table(iclass, true); @@ -34466,9 +34288,8 @@ int hexagon_disasm_instruction(HexState *state, const ut32 hi_u32, RZ_INOUT HexI hic->ana_op.eob = true; } if (hic->identifier == HEX_INS_INVALID_DECODE) { + hic->is_duplex = false; hic->ana_op.type = RZ_ANALYSIS_OP_TYPE_ILL; - HexInsn *hi = hexagon_alloc_instr(); - hic->bin.insn = hi; snprintf(hic->bin.insn->text_infix, sizeof(hic->bin.insn->text_infix), "invalid"); } hex_set_hic_text(hic); diff --git a/librz/arch/isa/hexagon/hexagon_dwarf_reg_num_table.inc b/librz/arch/isa/hexagon/hexagon_dwarf_reg_num_table.inc index 01d916cfc1b..148ccc01c40 100644 --- a/librz/arch/isa/hexagon/hexagon_dwarf_reg_num_table.inc +++ b/librz/arch/isa/hexagon/hexagon_dwarf_reg_num_table.inc @@ -1,9 +1,9 @@ -// SPDX-FileCopyrightText: 2023 Rot127 +// SPDX-FileCopyrightText: 2021 Rot127 // SPDX-License-Identifier: LGPL-3.0-only // LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c // LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) -// Date of code generation: 2023-11-15 14:59:38-05:00 +// Date of code generation: 2024-03-16 06:22:39-05:00 //======================================== // The following code is generated. // Do not edit. Repository of code generator: @@ -86,6 +86,16 @@ static const char *map_dwarf_reg_to_hexagon_reg(ut32 reg_num) { case 84: return "C17"; case 85: return "C18"; case 86: return "C19"; + case 87: return "C20"; + case 88: return "C21"; + case 89: return "C22"; + case 90: return "C23"; + case 91: return "C24"; + case 92: return "C25"; + case 93: return "C26"; + case 94: return "C27"; + case 95: return "C28"; + case 96: return "C29"; case 97: return "C30"; case 98: return "C31"; case 99: return "V0"; diff --git a/librz/arch/isa/hexagon/hexagon_il.c b/librz/arch/isa/hexagon/hexagon_il.c new file mode 100644 index 00000000000..6a98bc2d1ce --- /dev/null +++ b/librz/arch/isa/hexagon/hexagon_il.c @@ -0,0 +1,880 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static HexILOp hex_jump_flag_init_op = { + .attr = HEX_IL_INSN_ATTR_NONE, + .get_il_op = (HexILOpGetter)hex_il_op_jump_flag_init, +}; + +static HexILOp hex_next_jump_to_next_pkt = { + .attr = HEX_IL_INSN_ATTR_BRANCH | HEX_IL_INSN_ATTR_COND, + .get_il_op = (HexILOpGetter)hex_il_op_next_pkt_jmp, +}; + +static HexILOp hex_pkt_commit = { + .attr = HEX_IL_INSN_ATTR_NONE, + .get_il_op = (HexILOpGetter)hex_commit_packet, +}; + +static HexILOp hex_endloop0_op = { + .attr = HEX_IL_INSN_ATTR_BRANCH | HEX_IL_INSN_ATTR_COND, + .get_il_op = (HexILOpGetter)hex_il_op_j2_endloop0, +}; + +static HexILOp hex_endloop1_op = { + .attr = HEX_IL_INSN_ATTR_BRANCH | HEX_IL_INSN_ATTR_COND, + .get_il_op = (HexILOpGetter)hex_il_op_j2_endloop1, +}; + +static HexILOp hex_endloop01_op = { + .attr = HEX_IL_INSN_ATTR_BRANCH | HEX_IL_INSN_ATTR_COND, + .get_il_op = (HexILOpGetter)hex_il_op_j2_endloop01, +}; + +/** + * \brief Sends the IL op at \p start to the position \p newloc. + * + * Note: This is a copy of the same function implemented by Qualcomm in QEMU. + * See: https://gitlab.com/qemu-project/qemu/-/blob/master/target/hexagon/decode.c :: decode_send_insn_to + * + * \param ops The IL ops list. + * \param start Index of the op to move. + * \param newloc Position the op shall be moved to. + */ +static void hex_send_insn_to_i(RzPVector /**/ *ops, ut8 start, ut8 newloc) { + rz_return_if_fail(ops && newloc < rz_pvector_len(ops)); + + st32 direction; + if (start == newloc) { + return; + } + if (start < newloc) { + /* Move towards end */ + direction = 1; + } else { + /* move towards beginning */ + direction = -1; + } + for (st32 i = start; i != newloc; i += direction) { + HexILOp *neighbor_op = (HexILOp *)rz_pvector_at(ops, i + direction); + HexILOp *to_move_op = (HexILOp *)rz_pvector_at(ops, i); + rz_pvector_set(ops, i, neighbor_op); + rz_pvector_set(ops, i + direction, to_move_op); + } +} + +/** + * \brief Shuffles the IL operations of the packet instructions into the correct execution order + * and stores the result in \p p->il_ops + * + * The shuffle algorithm implemented here is a copy of Qualcomm's implementation in QEMU: + * https://gitlab.com/qemu-project/qemu/-/blob/master/target/hexagon/decode.c :: decode_shuffle_for_execution + * + * Though some changes were made: + * * Endloops are not handled here (they are pushed to the ops list afterwards). + * * ".new cmp jump" instructions were already split by us at this stage. So we don't check for them. + * + * \param p A valid packet which holds all instructions and the IL ops. + * \return true Shuffle was successful. + * \return false Shuffle failed. + */ +RZ_IPI bool hex_shuffle_insns(RZ_INOUT HexPkt *p) { + rz_return_val_if_fail(p, false); + if (!p->is_valid) { + // Incomplete packets cannot be executed. + return false; + } + if (rz_pvector_empty(p->il_ops)) { + RZ_LOG_WARN("Valid packet without RZIL instructions encountered! pkt addr = 0x%" PFMT32x "\n", p->pkt_addr); + return false; + } + RzPVector *ops = p->il_ops; + + // Do the shuffle + bool changed = false; + int i; + bool flag; /* flag means we've seen a non-memory instruction */ + int n_mems; /* Number of memory instructions passed */ + int last_insn = rz_pvector_len(p->il_ops) - 1; + HexILOp *op; + + do { + changed = false; + /* + * Stores go last, must not reorder. + * Cannot shuffle stores past loads, either. + * Iterate backwards. If we see a non-memory instruction, + * then a store, shuffle the store to the front. Don't shuffle + * stores with regard to each other or a load. + */ + n_mems = 0; + flag = false; + for (flag = false, n_mems = 0, i = last_insn; i >= 0; i--) { + op = (HexILOp *)rz_pvector_at(ops, i); + if (!op) { + RZ_LOG_FATAL("NULL il op at index %" PFMT32d "\n", i); + } + if (flag && (op->attr & HEX_IL_INSN_ATTR_MEM_WRITE)) { + hex_send_insn_to_i(ops, i, last_insn - n_mems); + n_mems++; + changed = true; + } else if (op->attr & HEX_IL_INSN_ATTR_MEM_WRITE) { + n_mems++; + } else if (op->attr & HEX_IL_INSN_ATTR_MEM_READ) { + /* + * Don't set flag, since we don't want to shuffle a + * store past a load + */ + n_mems++; + } else if (op->attr & HEX_IL_INSN_ATTR_NEW) { + /* + * Don't set flag, since we don't want to shuffle past + * a .new value + */ + } else { + flag = true; + } + } + if (changed) { + continue; + } + + /* Comparisons go first, may be reordered with regard to each other */ + for (flag = false, i = 0; i < last_insn + 1; i++) { + op = (HexILOp *)rz_pvector_at(ops, i); + if ((op->attr & HEX_IL_INSN_ATTR_WPRED) && + (op->attr & HEX_IL_INSN_ATTR_MEM_WRITE)) { + /* This should be a comparison (not a store conditional) */ + if (flag) { + hex_send_insn_to_i(ops, i, 0); + changed = true; + continue; + } + } else if (op->attr & HEX_IL_INSN_ATTR_WRITE_P3) /* && !is_endloop */ { + // Endloops get pushed afterwards. + if (flag) { + hex_send_insn_to_i(ops, i, 0); + changed = true; + continue; + } + } else if (op->attr & HEX_IL_INSN_ATTR_WRITE_P0) /* && !is_new_cmp_jmp */ { + // We have already split .new cmp jumps at this point. So no need to check for it. + if (flag) { + hex_send_insn_to_i(ops, i, 0); + changed = true; + continue; + } + } else { + flag = true; + } + } + if (changed) { + continue; + } + } while (changed); + + /* + * If we have a .new register compare/branch, move that to the very + * very end, past stores + */ + for (i = 0; i < last_insn; i++) { + op = (HexILOp *)rz_pvector_at(ops, i); + if (op->attr & HEX_IL_INSN_ATTR_NEW) { + hex_send_insn_to_i(ops, i, last_insn); + break; + } + } + return true; +} + +static RzILOpEffect *hex_il_op_to_effect(const HexILOp *il_op, HexPkt *pkt) { + rz_return_val_if_fail(il_op && il_op->get_il_op, NULL); + HexInsnPktBundle bundle = { 0 }; + bundle.insn = (HexInsn *)il_op->hi; + bundle.pkt = pkt; + return il_op->get_il_op(&bundle); +} + +/** + * \brief Transforms a list of HexILOps into a single sequence. + * + * \param pkt The hexagon packet of the + * \return RzILOpEffect* Sequence of operations to emulate the packet. + */ +static RZ_OWN RzILOpEffect *hex_pkt_to_il_seq(HexPkt *pkt) { + rz_return_val_if_fail(pkt && pkt->il_ops, NULL); + + if (rz_pvector_len(pkt->il_ops) == 1) { + rz_pvector_clear(pkt->il_ops); + // We need at least the instruction op and the packet commit. + // So if there aren't at least two ops something went wrong. + RZ_LOG_WARN("Invalid il ops sequence! There should be at least two il ops per packet.\n"); + return NULL; + } + RzILOpEffect *complete_seq = EMPTY(); + for (ut32 i = 0; i < rz_pvector_len(pkt->il_ops); ++i) { + complete_seq = SEQ2(complete_seq, hex_il_op_to_effect((HexILOp *)rz_pvector_at(pkt->il_ops, i), pkt)); + } + return complete_seq; +} + +static bool set_pkt_il_ops(RZ_INOUT HexPkt *p) { + rz_return_val_if_fail(p, false); + hex_reset_il_pkt_stats(&p->il_op_stats); + // This function is a lot of unnecessary overhead so: + // TODO The assignment of IL instructions to their actual instructions should be done in the instruction template. + // But with the current separation between Asm and Analysis plugins this is not possible. + // Because Asm is not allowed to depend on Analysis and the RZIL code. + // This should be fixed ASAP after RzArch has been introduced. + HexInsnContainer *pos; + RzListIter *it; + rz_list_foreach (p->bin, it, pos) { + HexILInsn *cur_il_insn; + if (pos->is_duplex) { + // High sub-instructions + pos->bin.sub[0]->il_insn = hex_il_getter_lt[pos->bin.sub[0]->identifier]; + cur_il_insn = &pos->bin.sub[0]->il_insn; + // high sub operation 0 + cur_il_insn->op0.hi = pos->bin.sub[0]; + if (cur_il_insn->op0.attr == HEX_IL_INSN_ATTR_INVALID) { + goto not_impl; + } + rz_pvector_push(p->il_ops, &cur_il_insn->op0); + + // high sub operation 1 + if (cur_il_insn->op1.attr != HEX_IL_INSN_ATTR_INVALID) { + cur_il_insn->op1.hi = pos->bin.sub[0]; + rz_pvector_push(p->il_ops, &cur_il_insn->op1); + } + + // Low sub-instructions + pos->bin.sub[1]->il_insn = hex_il_getter_lt[pos->bin.sub[1]->identifier]; + cur_il_insn = &pos->bin.sub[1]->il_insn; + // low sub operation 0 + cur_il_insn->op0.hi = pos->bin.sub[1]; + if (cur_il_insn->op0.attr == HEX_IL_INSN_ATTR_INVALID) { + goto not_impl; + } + rz_pvector_push(p->il_ops, &cur_il_insn->op0); + + // low sub operation 1 + if (cur_il_insn->op1.attr != HEX_IL_INSN_ATTR_INVALID) { + pos->bin.sub[1]->il_insn.op1.hi = pos->bin.sub[1]; + rz_pvector_push(p->il_ops, &cur_il_insn->op1); + } + } else { + pos->bin.insn->il_insn = hex_il_getter_lt[pos->bin.insn->identifier]; + cur_il_insn = &pos->bin.insn->il_insn; + // Insn operation 0 + cur_il_insn->op0.hi = pos->bin.insn; + if (cur_il_insn->op0.attr == HEX_IL_INSN_ATTR_INVALID) { + goto not_impl; + } + rz_pvector_push(p->il_ops, &cur_il_insn->op0); + // Insn operation 1 + if (cur_il_insn->op1.attr != HEX_IL_INSN_ATTR_INVALID) { + cur_il_insn->op1.hi = pos->bin.insn; + rz_pvector_push(p->il_ops, &cur_il_insn->op1); + } + } + } + return true; +not_impl: + RZ_LOG_INFO("Hexagon instruction %" PFMT32d " not implemented.\n", pos->bin.insn->identifier); + return false; +} + +static void check_for_jumps(const HexPkt *p, RZ_OUT bool *jump_flag) { + rz_return_if_fail(p && jump_flag); + void **it; + HexILOp *op; + rz_pvector_foreach (p->il_ops, it) { + op = *it; + if (op->attr & HEX_IL_INSN_ATTR_BRANCH) { + *jump_flag = true; + } + } +} + +/** + * \brief Checks if the packet at \p addr has all conditions fulfilled + * to be executed. + * + * \param pkt The packet to check. + * \param addr Address of the requested IL operation. + * + * \return true If the packet can be set up for emulation. + * \return false Otherwise. + */ +static inline bool pkt_at_addr_is_emu_ready(const HexPkt *pkt, const ut32 addr) { + if (rz_list_length(pkt->bin) == 1) { + const HexInsnContainer *hic = rz_list_get_n(pkt->bin, 0); + if (hic->identifier == HEX_INS_INVALID_DECODE) { + return false; + } + } + return addr == pkt->pkt_addr && pkt->is_valid && pkt->last_instr_present; +} + +/** + * \brief Returns the IL operation of the instruction at \p addr. This will always be EMPTY(). + * Except for last instructions in a packet. Those will always return the complete IL operation + * of the packet or NULL if one instruction was not implemented or an error occurred. + * + * \param addr Address of the requested IL operation. + * \param get_pkt_op If true, it returns the IL operation of the whole packet at \p addr. + * It will return EMPTY() if there is no packet which starts at \p addr. + * If false, the behavior is as documented above. + * \return RzILOpEffect* Sequence of operations to emulate the packet. + */ +RZ_IPI RzILOpEffect *hex_get_il_op(const ut32 addr, const bool get_pkt_op) { + static bool might_has_jumped = false; + HexState *state = hexagon_state(false); + if (!state) { + RZ_LOG_WARN("Failed to get hexagon plugin state data!\n"); + return NULL; + } + HexPkt *p = hex_get_pkt(state, addr); + if (!p) { + RZ_LOG_WARN("Packet was NULL although it should have been disassembled at this point.\n"); + return NULL; + } + HexInsnContainer *hic = hex_get_hic_at_addr(state, addr); + if (!hic) { + return EMPTY(); + } + if (hic->identifier == HEX_INS_INVALID_DECODE) { + return NULL; + } + if (state->just_init || might_has_jumped) { + // Assume that the instruction at the address the VM was initialized is the first instruction. + // Also make it valid if a jump let to this packet. + p->is_valid = true; + hic->pkt_info.first_insn = true; + state->just_init = false; + might_has_jumped = false; + } + + if (!get_pkt_op && !hic->pkt_info.last_insn) { + // Only at the last instruction we execute all il ops of the packet. + return EMPTY(); + } + + if (!(get_pkt_op && pkt_at_addr_is_emu_ready(p, addr)) || !pkt_at_addr_is_emu_ready(p, p->pkt_addr)) { + // Invalid packet, EMPTY() + return EMPTY(); + } + + if (!rz_pvector_empty(p->il_ops)) { + check_for_jumps(p, &might_has_jumped); + return hex_pkt_to_il_seq(p); + } + + rz_pvector_push(p->il_ops, &hex_jump_flag_init_op); + + if (!set_pkt_il_ops(p)) { + RZ_LOG_INFO("IL ops at 0x%" PFMT32x " contain not implemented instructions.\n", addr); + return NULL; + } + + if (!hex_shuffle_insns(p)) { + RZ_LOG_WARN("Instruction shuffle failed at 0x%" PFMT32x "\n", addr); + return NULL; + } + + if (hex_get_loop_flag(p) == HEX_LOOP_0) { + rz_pvector_push(p->il_ops, &hex_endloop0_op); + } else if (hex_get_loop_flag(p) == HEX_LOOP_1) { + rz_pvector_push(p->il_ops, &hex_endloop1_op); + } else if (hex_get_loop_flag(p) == HEX_LOOP_01) { + rz_pvector_push(p->il_ops, &hex_endloop01_op); + } + + rz_pvector_push(p->il_ops, &hex_pkt_commit); + // Add a jump to the next packet. + rz_pvector_push(p->il_ops, &hex_next_jump_to_next_pkt); + + check_for_jumps(p, &might_has_jumped); + + return hex_pkt_to_il_seq(p); +} + +static void log_reg_read(RZ_BORROW HexPkt *pkt, ut8 reg_num, HexRegClass reg_class, bool tmp_reg) { + rz_return_if_fail(pkt); + if (reg_num > 63 || (reg_class == HEX_REG_CLASS_PRED_REGS && reg_num > 3)) { + rz_warn_if_reached(); + RZ_LOG_WARN("Register number %d should not be greater then 63 (gprs) or 3 (predicates).", reg_num); + } + switch (reg_class) { + default: + rz_warn_if_reached(); + RZ_LOG_WARN("Register reads of register class %d are not yet tracked!", reg_class); + break; + case HEX_REG_CLASS_DOUBLE_REGS: + case HEX_REG_CLASS_GENERAL_DOUBLE_LOW8_REGS: + if (tmp_reg) { + rz_bv_set(pkt->il_op_stats.gpr_tmp_read, (reg_num + 1), true); + } else { + rz_bv_set(pkt->il_op_stats.gpr_read, (reg_num + 1), true); + } + // fallthrough + case HEX_REG_CLASS_INT_REGS: + case HEX_REG_CLASS_INT_REGS_LOW8: + case HEX_REG_CLASS_GENERAL_SUB_REGS: + if (tmp_reg) { + rz_bv_set(pkt->il_op_stats.gpr_tmp_read, reg_num, true); + } else { + rz_bv_set(pkt->il_op_stats.gpr_read, reg_num, true); + } + break; + case HEX_REG_CLASS_CTR_REGS64: + if (tmp_reg) { + rz_bv_set(pkt->il_op_stats.ctr_tmp_read, (reg_num + 1), true); + } else { + rz_bv_set(pkt->il_op_stats.ctr_read, (reg_num + 1), true); + } + // fallthrough + case HEX_REG_CLASS_MOD_REGS: + case HEX_REG_CLASS_CTR_REGS: + if (tmp_reg) { + rz_bv_set(pkt->il_op_stats.ctr_tmp_read, reg_num, true); + } else { + rz_bv_set(pkt->il_op_stats.ctr_read, reg_num, true); + } + break; + case HEX_REG_CLASS_PRED_REGS: + if (tmp_reg) { + rz_bv_set(pkt->il_op_stats.pred_tmp_read, reg_num, true); + } else { + rz_bv_set(pkt->il_op_stats.pred_read, reg_num, true); + } + break; + } +} + +static inline void log_pred_write_slot(HexInsnPktBundle *bundle, ut32 pred_num) { + ut32 pos = (pred_num * HEX_LOG_SLOT_LOG_WIDTH); + rz_bv_set_range(bundle->pkt->il_op_stats.pred_written, HEX_LOG_SLOT_BIT_OFF + pos, HEX_LOG_SLOT_BIT_OFF + pos + 2, false); + rz_bv_set(bundle->pkt->il_op_stats.pred_written, bundle->insn->slot + HEX_LOG_SLOT_BIT_OFF + pos, true); +} + +static void log_reg_write(RZ_BORROW HexInsnPktBundle *bundle, ut8 reg_num, HexRegClass reg_class, bool read, bool tmp_reg) { + rz_return_if_fail(bundle); + HexPkt *pkt = bundle->pkt; + + if (reg_num > 63 || (reg_class == HEX_REG_CLASS_PRED_REGS && reg_num > 3)) { + rz_warn_if_reached(); + RZ_LOG_WARN("Register number %d should not be greater then 63 (gprs) or 3 (predicates).", reg_num); + } + switch (reg_class) { + default: + rz_warn_if_reached(); + RZ_LOG_WARN("Register writes of register class %d are not yet tracked!", reg_class); + break; + case HEX_REG_CLASS_DOUBLE_REGS: + case HEX_REG_CLASS_GENERAL_DOUBLE_LOW8_REGS: + rz_bv_set(pkt->il_op_stats.gpr_written, (reg_num + 1), true); + // fallthrough + case HEX_REG_CLASS_INT_REGS: + case HEX_REG_CLASS_INT_REGS_LOW8: + case HEX_REG_CLASS_GENERAL_SUB_REGS: + rz_bv_set(pkt->il_op_stats.gpr_written, reg_num, true); + break; + case HEX_REG_CLASS_CTR_REGS64: + if (hex_ctr_immut_masks[reg_num + 1] != HEX_IMMUTABLE_REG) { + rz_bv_set(pkt->il_op_stats.ctr_written, (reg_num + 1), true); + } + // fallthrough + case HEX_REG_CLASS_MOD_REGS: + case HEX_REG_CLASS_CTR_REGS: + if (hex_ctr_immut_masks[reg_num] != HEX_IMMUTABLE_REG) { + rz_bv_set(pkt->il_op_stats.ctr_written, reg_num, true); + } + break; + case HEX_REG_CLASS_PRED_REGS: + rz_bv_set(pkt->il_op_stats.pred_written, reg_num, true); + if (bundle->insn) { + log_pred_write_slot(bundle, reg_num); + } + break; + } +} + +static ut32 get_last_slot_w_to_p(const HexPkt *pkt, ut32 pred_num) { + rz_return_val_if_fail(pkt, false); + ut32 slots = (rz_bv_to_ut32(pkt->il_op_stats.pred_written) >> HEX_LOG_SLOT_BIT_OFF); + return (slots >> (pred_num * HEX_LOG_SLOT_LOG_WIDTH)) & HEX_LOG_SLOT_LOG_MASK; +} + +/** + * \brief Checks if another slot wrote to a given predicate reg before. + * + * \param bundle The bundle currently in use. + * \param pred_num The number of the predicate register to check. + * + * \return true The predicate was written before by another slot. + * \return false The predicate was not written by another slot. + */ +static bool other_slot_wrote_to_pred(const HexInsnPktBundle *bundle, ut32 pred_num) { + rz_return_val_if_fail(bundle && bundle->pkt && (pred_num < 4), false); + const HexPkt *pkt = bundle->pkt; + if (!bundle->insn) { + // Non instruction ops + return rz_bv_get(pkt->il_op_stats.pred_written, 1 << pred_num); + } + bool pw = rz_bv_get(pkt->il_op_stats.pred_written, 1 << pred_num); + bool slot_w = get_last_slot_w_to_p(bundle->pkt, pred_num) != bundle->insn->slot; + return pw && slot_w; +} + +static inline RzILOpPure *get_masked_reg_val(RzILOpPure *reg_val, RzILOpPure *val, ut32 mask) { + RzILOpPure *masked_val = LOGAND(val, LOGNOT(U32(mask))); + RzILOpPure *masked_reg = LOGAND(reg_val, U32(mask)); + return LOGOR(masked_reg, masked_val); +} + +/** + * \brief Writes the given value to the register specified in \p op and logs the write. + * If the register is a double register, each of its sub-registers are written separately. + * The double register itself will *not* be written. + * + * \param pkt The currently executed packet. + * \param op The HexOp of the register to write. + * \param val The value to write. + * + * \return The effect which writes the register or NULL in case of failure. + */ +RZ_IPI RZ_OWN RzILOpEffect *hex_write_reg(RZ_BORROW HexInsnPktBundle *bundle, const HexOp *op, RzILOpPure *val) { + rz_return_val_if_fail(bundle && op && val, NULL); + + const char *high_name = NULL; + const char *low_name = NULL; + RzILOpPure *high_val = NULL; + RzILOpPure *low_val = NULL; + RzILOpEffect *p3_0_write_seq = NULL; // If C4 (P3:0) is written this is non-NULL. + ut32 reg_num = hex_resolve_reg_enum_id(op->class, op->op.reg); + ut32 dest_width = HEX_GPR_WIDTH; + switch (op->class) { + default: + rz_warn_if_reached(); + RZ_LOG_WARN("Writing ops of class %d is not implemented yet.", op->class); + return NULL; + case HEX_REG_CLASS_DOUBLE_REGS: + case HEX_REG_CLASS_GENERAL_DOUBLE_LOW8_REGS: + high_name = hex_get_reg_in_class(HEX_REG_CLASS_INT_REGS, reg_num + 1, false, true, true); + if (!high_name) { + return NULL; + } + high_val = SHIFTR0(DUP(val), U8(HEX_GPR_WIDTH)); + // fallthrough + case HEX_REG_CLASS_INT_REGS: + case HEX_REG_CLASS_INT_REGS_LOW8: + case HEX_REG_CLASS_GENERAL_SUB_REGS: + low_name = hex_get_reg_in_class(HEX_REG_CLASS_INT_REGS, reg_num, false, true, true); + if (!low_name) { + return NULL; + } + low_val = CAST(HEX_GPR_WIDTH, IL_FALSE, val); + break; + case HEX_REG_CLASS_CTR_REGS64: + if (hex_ctr_immut_masks[reg_num + 1] != HEX_IMMUTABLE_REG) { + high_name = hex_get_reg_in_class(HEX_REG_CLASS_CTR_REGS, reg_num + 1, false, true, true); + if (!high_name) { + return NULL; + } + high_val = SHIFTR0(DUP(val), U8(HEX_GPR_WIDTH)); + if (hex_ctr_immut_masks[reg_num + 1] != 0) { + high_val = get_masked_reg_val(VARG(high_name), CAST(HEX_GPR_WIDTH, IL_FALSE, high_val), hex_ctr_immut_masks[reg_num + 1]); + } + } + // fallthrough + case HEX_REG_CLASS_MOD_REGS: + case HEX_REG_CLASS_CTR_REGS: + if (hex_ctr_immut_masks[reg_num] != HEX_IMMUTABLE_REG) { + low_name = hex_get_reg_in_class(HEX_REG_CLASS_CTR_REGS, reg_num, false, true, true); + if (!low_name) { + return NULL; + } + low_val = CAST(HEX_GPR_WIDTH, IL_FALSE, val); + if (hex_ctr_immut_masks[reg_num] != 0) { + low_val = get_masked_reg_val(VARG(low_name), low_val, hex_ctr_immut_masks[reg_num]); + } + if (reg_num == 4) { + HexOp pred_op = { 0 }; + pred_op.class = HEX_REG_CLASS_PRED_REGS; + pred_op.op.reg = 0; + p3_0_write_seq = hex_write_reg(bundle, &pred_op, CAST(8, IL_FALSE, DUP(low_val))); + pred_op.op.reg = 1; + p3_0_write_seq = SEQ2(hex_write_reg(bundle, &pred_op, CAST(8, IL_FALSE, SHIFTR0(DUP(low_val), U8(8)))), p3_0_write_seq); + pred_op.op.reg = 2; + p3_0_write_seq = SEQ2(hex_write_reg(bundle, &pred_op, CAST(8, IL_FALSE, SHIFTR0(DUP(low_val), U8(16)))), p3_0_write_seq); + pred_op.op.reg = 3; + p3_0_write_seq = SEQ2(hex_write_reg(bundle, &pred_op, CAST(8, IL_FALSE, SHIFTR0(DUP(low_val), U8(24)))), p3_0_write_seq); + break; + } + } + break; + case HEX_REG_CLASS_PRED_REGS: + low_name = hex_get_reg_in_class(HEX_REG_CLASS_PRED_REGS, reg_num, false, true, true); + if (!low_name) { + return NULL; + } + if (other_slot_wrote_to_pred(bundle, reg_num)) { + // If the register was written before by another slot, the values get ANDed. + low_val = LOGAND(VARG(low_name), val); + } else { + low_val = val; + } + dest_width = HEX_PRED_WIDTH; + break; + } + RzILOpEffect *write_high = high_val ? SETG(high_name, CAST(dest_width, IL_FALSE, high_val)) : NULL; + RzILOpEffect *write_low = low_val ? SETG(low_name, CAST(dest_width, IL_FALSE, low_val)) : NULL; + if (p3_0_write_seq) { + write_low = SEQ2(write_low, p3_0_write_seq); + } + log_reg_write(bundle, reg_num, op->class, false, true); + + if (write_high && write_low) { + return SEQ2(write_low, write_high); + } else if (write_low) { + return write_low; + } else if (write_high) { + return write_high; + } + return EMPTY(); +} + +static inline bool read_cond_faulty(RzILOpPure *low_val, RzILOpPure *high_val, ut32 val_width) { + if (!low_val || val_width == 0 || (val_width % 8 != 0)) { + return true; + } + if (val_width == HEX_GPR64_WIDTH && !high_val) { + return true; + } + return false; +} + +/** + * \brief Checks for rw registers (e.g. Rx) if reads and writes overlap. + * + * \param pkt The packet of the current instruction. + * \param op The operand to check. + * \param reg_num The number of the register to check. + * + * \return true If the register is a "x" register and it was read and written before. + * \return false Otherwise. + */ +static bool x_reg_rw_overlap(const HexPkt *pkt, const HexOp *op, ut32 reg_num) { + switch (op->class) { + default: + rz_warn_if_reached(); + RZ_LOG_WARN("Checking rw overlap of class %d not implemented yet.", op->class); + return false; + case HEX_REG_CLASS_INT_REGS: + case HEX_REG_CLASS_INT_REGS_LOW8: + case HEX_REG_CLASS_GENERAL_SUB_REGS: + case HEX_REG_CLASS_DOUBLE_REGS: + case HEX_REG_CLASS_GENERAL_DOUBLE_LOW8_REGS: + return (rz_bv_get(pkt->il_op_stats.gpr_written, reg_num)) && (rz_bv_get(pkt->il_op_stats.gpr_read, reg_num)) && op->isa_id == 'x'; + case HEX_REG_CLASS_MOD_REGS: + case HEX_REG_CLASS_CTR_REGS: + case HEX_REG_CLASS_CTR_REGS64: + return (rz_bv_get(pkt->il_op_stats.ctr_written, reg_num)) && (rz_bv_get(pkt->il_op_stats.ctr_read, reg_num)) && op->isa_id == 'x'; + case HEX_REG_CLASS_PRED_REGS: + return (rz_bv_get(pkt->il_op_stats.pred_written, reg_num)) && (rz_bv_get(pkt->il_op_stats.pred_read, reg_num)) && op->isa_id == 'x'; + } +} + +/** + * \brief Reads a value from the register specified in \p op and logs the read. + * If the register is a double register, each of its sub-registers are read separately. + * The double register itself will *not* be read. + * + * \param pkt The currently executed packet. + * \param op The HexOp of the register to read. + * \param tmp_reg If true, the .new register will be read. Otherwise simply . + * + * \return The pure which with the value read or NULL in case of failure. + */ +RZ_IPI RZ_OWN RzILOpPure *hex_read_reg(RZ_BORROW HexPkt *pkt, const HexOp *op, bool tmp_reg) { + rz_return_val_if_fail(pkt && op, NULL); + + const char *high_name = NULL; + const char *low_name = NULL; + RzILOpPure *high_val = NULL; + RzILOpPure *low_val = NULL; + ut32 reg_num = hex_resolve_reg_enum_id(op->class, op->op.reg); + ut32 val_width = HEX_GPR_WIDTH; + switch (op->class) { + default: + rz_warn_if_reached(); + RZ_LOG_WARN("Writing ops of class %d is not implemented yet.", op->class); + return NULL; + case HEX_REG_CLASS_DOUBLE_REGS: + case HEX_REG_CLASS_GENERAL_DOUBLE_LOW8_REGS: + if (x_reg_rw_overlap(pkt, op, reg_num + 1)) { + // If read and writes overlap, return the new register for each read. + tmp_reg = true; + } + high_name = hex_get_reg_in_class(HEX_REG_CLASS_INT_REGS, reg_num + 1, false, tmp_reg, true); + if (!high_name) { + return NULL; + } + high_val = SHIFTL0(CAST(HEX_GPR64_WIDTH, IL_FALSE, VARG(high_name)), U8(HEX_GPR_WIDTH)); + val_width = HEX_GPR64_WIDTH; + // fallthrough + case HEX_REG_CLASS_INT_REGS: + case HEX_REG_CLASS_INT_REGS_LOW8: + case HEX_REG_CLASS_GENERAL_SUB_REGS: + if (x_reg_rw_overlap(pkt, op, reg_num)) { + // If read and writes overlap, return the new register for each read. + tmp_reg = true; + } + low_name = hex_get_reg_in_class(HEX_REG_CLASS_INT_REGS, reg_num, false, tmp_reg, true); + if (!low_name) { + return NULL; + } + low_val = VARG(low_name); + break; + case HEX_REG_CLASS_CTR_REGS64: + if (x_reg_rw_overlap(pkt, op, reg_num + 1)) { + // If read and writes overlap, return the new register for each read. + tmp_reg = true; + } + high_name = hex_get_reg_in_class(HEX_REG_CLASS_CTR_REGS, reg_num + 1, false, tmp_reg, true); + if (!high_name) { + return NULL; + } + if (reg_num + 1 == 9) { + // C9 = PC. Does not exist in VM as var + high_val = SHIFTL0(CAST(HEX_GPR64_WIDTH, IL_FALSE, U32(pkt->pkt_addr)), U8(HEX_GPR_WIDTH)); + } else { + high_val = SHIFTL0(CAST(HEX_GPR64_WIDTH, IL_FALSE, VARG(high_name)), U8(HEX_GPR_WIDTH)); + } + val_width = HEX_GPR64_WIDTH; + // fallthrough + case HEX_REG_CLASS_MOD_REGS: + case HEX_REG_CLASS_CTR_REGS: + if (x_reg_rw_overlap(pkt, op, reg_num)) { + // If read and writes overlap, return the new register for each read. + tmp_reg = true; + } + if (reg_num == 4) { + // C4 alias P3:0 register is the concatenation of all predicate registers. + HexOp pred_op = { 0 }; + pred_op.class = HEX_REG_CLASS_PRED_REGS; + pred_op.op.reg = 0; + low_val = hex_read_reg(pkt, &pred_op, tmp_reg); + pred_op.op.reg = 1; + low_val = APPEND(hex_read_reg(pkt, &pred_op, tmp_reg), low_val); + pred_op.op.reg = 2; + low_val = APPEND(hex_read_reg(pkt, &pred_op, tmp_reg), low_val); + pred_op.op.reg = 3; + low_val = APPEND(hex_read_reg(pkt, &pred_op, tmp_reg), low_val); + break; + } + low_name = hex_get_reg_in_class(HEX_REG_CLASS_CTR_REGS, reg_num, false, tmp_reg, true); + if (!low_name) { + return NULL; + } + if (reg_num == 9) { + low_val = U32(pkt->pkt_addr); + } else { + low_val = VARG(low_name); + } + break; + case HEX_REG_CLASS_PRED_REGS: + if (x_reg_rw_overlap(pkt, op, reg_num)) { + // If read and writes overlap, return the new register for each read. + tmp_reg = true; + } + low_name = hex_get_reg_in_class(HEX_REG_CLASS_PRED_REGS, reg_num, false, tmp_reg, true); + if (!low_name) { + return NULL; + } + return VARG(low_name); + } + if (read_cond_faulty(low_val, high_val, val_width)) { + rz_warn_if_reached(); + return NULL; + } + log_reg_read(pkt, reg_num, op->class, tmp_reg); + + if (val_width == HEX_GPR64_WIDTH) { + return LOGOR(high_val, CAST(HEX_GPR64_WIDTH, IL_FALSE, low_val)); + } + return low_val; +} + +RZ_IPI RZ_OWN RzILOpEffect *hex_cancel_slot(RZ_BORROW HexPkt *pkt, ut8 slot) { + rz_return_val_if_fail(pkt, NULL); + if (slot > 3) { + rz_warn_if_reached(); + RZ_LOG_WARN("Slot %d does not exist!", slot); + } + rz_bv_set(pkt->il_op_stats.slot_cancelled, slot, true); + return EMPTY(); +} + +RzILOpPure *hex_get_corresponding_cs(RZ_BORROW HexPkt *pkt, const HexOp *Mu) { + rz_return_val_if_fail(Mu && Mu->class == HEX_REG_CLASS_MOD_REGS, NULL); + HexOp cs_reg = { 0 }; + if (Mu->op.reg == 0) { + // M0 (C6) return CS0 + cs_reg.class = HEX_REG_CLASS_CTR_REGS; + cs_reg.op.reg = 12; + return hex_read_reg(pkt, &cs_reg, true); + } else if (Mu->op.reg == 1) { + // M1 (C7) return CS1 + cs_reg.class = HEX_REG_CLASS_CTR_REGS; + cs_reg.op.reg = 13; + return hex_read_reg(pkt, &cs_reg, true); + } + rz_warn_if_reached(); + return NULL; +} + +RZ_IPI void hex_reset_il_pkt_stats(HexILExecData *stats) { + rz_bv_free(stats->slot_cancelled); + rz_bv_free(stats->ctr_written); + rz_bv_free(stats->gpr_written); + rz_bv_free(stats->pred_written); + rz_bv_free(stats->ctr_read); + rz_bv_free(stats->gpr_read); + rz_bv_free(stats->pred_read); + rz_bv_free(stats->ctr_tmp_read); + rz_bv_free(stats->gpr_tmp_read); + rz_bv_free(stats->pred_tmp_read); + stats->slot_cancelled = rz_bv_new(64); + stats->ctr_written = rz_bv_new(64); + stats->gpr_written = rz_bv_new(64); + stats->pred_written = rz_bv_new(32); + stats->ctr_read = rz_bv_new(64); + stats->gpr_read = rz_bv_new(64); + stats->pred_read = rz_bv_new(32); + stats->ctr_tmp_read = rz_bv_new(64); + stats->gpr_tmp_read = rz_bv_new(64); + stats->pred_tmp_read = rz_bv_new(32); +} + +#include diff --git a/librz/arch/isa/hexagon/hexagon_il.h b/librz/arch/isa/hexagon/hexagon_il.h new file mode 100644 index 00000000000..93fffd23ce6 --- /dev/null +++ b/librz/arch/isa/hexagon/hexagon_il.h @@ -0,0 +1,2103 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#ifndef HEXAGON_IL_H +#define HEXAGON_IL_H + +#include +#include +#include + +#define WRITE_REG(pkt, op, val) hex_write_reg(pkt, op, val) +#define READ_REG(pkt, op, tmp_reg) hex_read_reg(pkt, op, tmp_reg) +#define ISA2REG(hi, var, tmp_reg) hex_isa_to_reg(hi, var, tmp_reg) +#define ISA2IMM(hi, var) hex_isa_to_imm(hi, var) +#define HEX_REGFIELD(prop, reg) hex_get_rf_property_val(prop, reg) +#define HEX_EXTRACT64(val, start, len) hex_extract64(val, start, len) +#define HEX_SEXTRACT64(val, start, len) hex_sextract64(val, start, len) +#define HEX_DEPOSIT64(val, start, len, fieldval) hex_deposit64(val, start, len, fieldval) +#define HEX_GET_NPC(pkt) hex_get_npc(pkt) +#define HEX_WRITE_GLOBAL(name, val) hex_write_global(name, val) +#define INC(val, size) ADD(val, UN(size, 1)) +#define DEC(val, size) SUB(val, UN(size, 1)) +#define HEX_STORE_SLOT_CANCELLED(pkt, slot) hex_cancel_slot(pkt, slot) +#define HEX_FCIRC_ADD(bundle, RxV, offset, mu, CS) hex_fcircadd(bundle, RxV, offset, mu, CS) +#define HEX_GET_CORRESPONDING_CS(pkt, Mu) hex_get_corresponding_cs(pkt, Mu) +#define HEX_GET_INSN_RMODE(insn) (insn->fround_mode) +#define HEX_D_TO_SINT(mode, fval) F2SINT(64, mode, fval) +#define HEX_F_TO_SINT(mode, fval) F2SINT(32, mode, fval) +#define HEX_D_TO_INT(mode, fval) F2INT(64, mode, fval) +#define HEX_F_TO_INT(mode, fval) F2INT(32, mode, fval) +#define HEX_SINT_TO_D(mode, fval) SINT2F(64, mode, fval) +#define HEX_SINT_TO_F(mode, fval) SINT2F(32, mode, fval) +#define HEX_INT_TO_D(mode, fval) INT2F(64, mode, fval) +#define HEX_INT_TO_F(mode, fval) INT2F(32, mode, fval) + +#define HEX_IMMUTABLE_REG (~0) +#define HEX_NOT_MASKED 0 + +/// Immutable bits of CTR registers as in QEMU. +static const ut64 hex_ctr_immut_masks[32] = { + [HEX_REG_CTR_REGS_C8] = 0xc13000c0, // USR + [HEX_REG_CTR_REGS_C9] = HEX_IMMUTABLE_REG, // PC + [HEX_REG_CTR_REGS_C11] = 0x3f, // GP + [HEX_REG_CTR_REGS_C14] = HEX_IMMUTABLE_REG, // UPCYCLELO + [HEX_REG_CTR_REGS_C15] = HEX_IMMUTABLE_REG, // UPCYCLEHI + [HEX_REG_CTR_REGS_C30] = HEX_IMMUTABLE_REG, // UTIMERLO + [HEX_REG_CTR_REGS_C31] = HEX_IMMUTABLE_REG, // UTIMERHI +}; + +RZ_IPI bool hex_shuffle_insns(RZ_INOUT HexPkt *p); +RZ_IPI RzILOpEffect *hex_get_il_op(const ut32 addr, const bool get_pkt_op); +RZ_IPI RZ_OWN RzILOpPure *hex_get_rf_property_val(const HexRegFieldProperty property, const HexRegField field); +RZ_IPI RZ_OWN RzILOpEffect *hex_get_npc(const HexPkt *pkt); +RZ_IPI RZ_OWN RzILOpEffect *hex_il_op_jump_flag_init(HexInsnPktBundle *bundle); +RZ_IPI RZ_OWN RzILOpEffect *hex_il_op_next_pkt_jmp(HexInsnPktBundle *bundle); +RZ_IPI RZ_OWN RzILOpEffect *hex_commit_packet(HexInsnPktBundle *bundle); +RZ_IPI RZ_OWN RzILOpEffect *hex_write_reg(RZ_BORROW HexInsnPktBundle *bundle, const HexOp *op, RzILOpPure *val); +RZ_IPI RZ_OWN RzILOpPure *hex_read_reg(RZ_BORROW HexPkt *pkt, const HexOp *op, bool tmp_reg); +RZ_IPI RZ_OWN RzILOpEffect *hex_cancel_slot(RZ_BORROW HexPkt *pkt, ut8 slot); +RZ_IPI void hex_reset_il_pkt_stats(HexILExecData *stats); +RzILOpPure *hex_get_corresponding_cs(RZ_BORROW HexPkt *pkt, const HexOp *Mu); +RzILOpEffect *hex_il_op_a2_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_absp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_abssat(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_add(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_addh_h16_hh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_addh_h16_hl(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_addh_h16_lh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_addh_h16_ll(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_addh_h16_sat_hh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_addh_h16_sat_hl(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_addh_h16_sat_lh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_addh_h16_sat_ll(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_addh_l16_hl(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_addh_l16_ll(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_addh_l16_sat_hl(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_addh_l16_sat_ll(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_addi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_addp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_addpsat(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_addsat(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_addsph(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_addspl(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_and(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_andir(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_andp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_aslh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_asrh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_combine_hh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_combine_hl(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_combine_lh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_combine_ll(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_combineii(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_combinew(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_max(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_maxp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_maxu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_maxup(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_min(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_minp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_minu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_minup(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_negp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_negsat(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_nop(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_notp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_or(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_orir(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_orp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_paddf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_paddfnew(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_paddif(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_paddifnew(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_paddit(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_padditnew(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_paddt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_paddtnew(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_pandf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_pandfnew(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_pandt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_pandtnew(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_porf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_porfnew(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_port(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_portnew(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_psubf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_psubfnew(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_psubt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_psubtnew(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_pxorf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_pxorfnew(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_pxort(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_pxortnew(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_roundsat(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_sat(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_satb(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_sath(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_satub(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_satuh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_sub(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_subh_h16_hh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_subh_h16_hl(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_subh_h16_lh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_subh_h16_ll(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_subh_h16_sat_hh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_subh_h16_sat_hl(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_subh_h16_sat_lh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_subh_h16_sat_ll(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_subh_l16_hl(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_subh_l16_ll(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_subh_l16_sat_hl(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_subh_l16_sat_ll(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_subp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_subri(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_subsat(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_svaddh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_svaddhs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_svadduhs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_svavgh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_svavghs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_svnavgh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_svsubh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_svsubhs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_svsubuhs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_swiz(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_sxtb(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_sxth(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_sxtw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_tfr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_tfrcrr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_tfrih(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_tfril(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_tfrrcr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_tfrsi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vabsh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vabshsat(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vabsw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vabswsat(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vaddh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vaddhs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vaddub(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vaddubs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vadduhs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vaddw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vaddws(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vavgh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vavghcr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vavghr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vavgub(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vavgubr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vavguh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vavguhr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vavguw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vavguwr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vavgw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vavgwcr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vavgwr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vcmpbeq(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vcmpbgtu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vcmpheq(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vcmphgt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vcmphgtu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vcmpweq(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vcmpwgt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vcmpwgtu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vconj(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vmaxb(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vmaxh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vmaxub(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vmaxuh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vmaxuw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vmaxw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vminb(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vminh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vminub(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vminuh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vminuw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vminw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vnavgh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vnavghcr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vnavghr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vnavgw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vnavgwcr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vnavgwr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vraddub(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vraddub_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vrsadub(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vrsadub_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vsubh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vsubhs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vsubub(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vsububs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vsubuhs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vsubw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_vsubws(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_xor(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_xorp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a2_zxth(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_addp_c(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_andn(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_andnp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_bitsplit(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_bitspliti(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_boundscheck_hi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_boundscheck_lo(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_cmpbeq(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_cmpbeqi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_cmpbgt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_cmpbgti(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_cmpbgtu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_cmpbgtui(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_cmpheq(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_cmpheqi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_cmphgt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_cmphgti(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_cmphgtu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_cmphgtui(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_combineii(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_combineir(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_combineri(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_cround_ri(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_cround_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_ext(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_modwrapu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_orn(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_ornp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_paslhf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_paslhfnew(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_paslht(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_paslhtnew(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_pasrhf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_pasrhfnew(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_pasrht(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_pasrhtnew(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_psxtbf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_psxtbfnew(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_psxtbt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_psxtbtnew(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_psxthf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_psxthfnew(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_psxtht(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_psxthtnew(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_pzxtbf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_pzxtbfnew(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_pzxtbt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_pzxtbtnew(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_pzxthf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_pzxthfnew(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_pzxtht(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_pzxthtnew(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_rcmpeq(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_rcmpeqi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_rcmpneq(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_rcmpneqi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_round_ri(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_round_ri_sat(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_round_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_round_rr_sat(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_subp_c(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_tfrcpp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_tfrpcp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_tlbmatch(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_vcmpbeq_any(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_vcmpbeqi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_vcmpbgt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_vcmpbgti(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_vcmpbgtui(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_vcmpheqi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_vcmphgti(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_vcmphgtui(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_vcmpweqi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_vcmpwgti(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_vcmpwgtui(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_vrmaxh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_vrmaxuh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_vrmaxuw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_vrmaxw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_vrminh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_vrminuh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_vrminuw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a4_vrminw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a5_acs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a5_vaddhubs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a6_vcmpbeq_notany(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a6_vminub_rdp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a7_clip(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a7_croundd_ri(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a7_croundd_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_a7_vclip(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_all8(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_and(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_andn(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_any8(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_bitsclr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_bitsclri(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_bitsset(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_ccombinewf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_ccombinewnewf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_ccombinewnewt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_ccombinewt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_cmoveif(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_cmoveit(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_cmovenewif(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_cmovenewit(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_cmpeq(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_cmpeqi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_cmpeqp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_cmpgt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_cmpgti(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_cmpgtp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_cmpgtu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_cmpgtui(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_cmpgtup(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_mask(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_mux(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_muxii(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_muxir(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_muxri(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_not(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_or(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_orn(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_tfrpr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_tfrrp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_vitpack(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_vmux(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c2_xor(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c4_addipc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c4_and_and(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c4_and_andn(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c4_and_or(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c4_and_orn(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c4_cmplte(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c4_cmpltei(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c4_cmplteu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c4_cmplteui(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c4_cmpneq(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c4_cmpneqi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c4_fastcorner9(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c4_fastcorner9_not(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c4_nbitsclr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c4_nbitsclri(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c4_nbitsset(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c4_or_and(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c4_or_andn(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c4_or_or(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_c4_or_orn(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_conv_d2df(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_conv_d2sf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_conv_df2d(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_conv_df2d_chop(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_conv_df2sf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_conv_df2ud(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_conv_df2ud_chop(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_conv_df2uw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_conv_df2uw_chop(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_conv_df2w(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_conv_df2w_chop(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_conv_sf2d(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_conv_sf2d_chop(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_conv_sf2df(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_conv_sf2ud(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_conv_sf2ud_chop(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_conv_sf2uw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_conv_sf2uw_chop(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_conv_sf2w(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_conv_sf2w_chop(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_conv_ud2df(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_conv_ud2sf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_conv_uw2df(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_conv_uw2sf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_conv_w2df(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_conv_w2sf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_dfadd(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_dfclass(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_dfcmpeq(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_dfcmpge(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_dfcmpgt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_dfcmpuo(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_dfimm_n(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_dfimm_p(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_dfmax(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_dfmin(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_dfmpyhh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_dfmpylh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_dfmpyll(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_dfsub(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_sfadd(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_sfclass(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_sfcmpeq(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_sfcmpge(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_sfcmpgt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_sfcmpuo(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_sffixupd(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_sffixupn(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_sffixupr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_sffma(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_sffma_lib(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_sffma_sc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_sffms(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_sffms_lib(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_sfimm_n(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_sfimm_p(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_sfinvsqrta(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_sfmax(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_sfmin(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_sfmpy(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_sfrecipa(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_f2_sfsub(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_g4_tfrgcpp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_g4_tfrgcrr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_g4_tfrgpcp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_g4_tfrgrcr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_call(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_callf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_callr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_callrf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_callrh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_callrt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_callt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_jump(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_jumpf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_jumpfnew(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_jumpfnewpt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_jumpfpt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_jumpr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_jumprf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_jumprfnew(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_jumprfnewpt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_jumprfpt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_jumprgtez(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_jumprgtezpt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_jumprh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_jumprltez(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_jumprltezpt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_jumprnz(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_jumprnzpt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_jumprt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_jumprtnew(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_jumprtnewpt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_jumprtpt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_jumprz(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_jumprzpt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_jumpt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_jumptnew(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_jumptnewpt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_jumptpt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_loop0i(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_loop0r(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_loop1i(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_loop1r(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_pause(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_ploop1si(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_ploop1sr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_ploop2si(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_ploop2sr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_ploop3si(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_ploop3sr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_rte(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_trap0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_trap1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_unpause(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeq_f_jumpnv_nt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeq_f_jumpnv_t(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeq_fp0_jump_nt_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeq_fp0_jump_nt_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeq_fp0_jump_t_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeq_fp0_jump_t_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeq_fp1_jump_nt_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeq_fp1_jump_nt_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeq_fp1_jump_t_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeq_fp1_jump_t_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeq_t_jumpnv_nt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeq_t_jumpnv_t(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeq_tp0_jump_nt_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeq_tp0_jump_nt_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeq_tp0_jump_t_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeq_tp0_jump_t_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeq_tp1_jump_nt_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeq_tp1_jump_nt_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeq_tp1_jump_t_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeq_tp1_jump_t_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqi_f_jumpnv_nt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqi_f_jumpnv_t(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqi_fp0_jump_nt_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqi_fp0_jump_nt_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqi_fp0_jump_t_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqi_fp0_jump_t_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqi_fp1_jump_nt_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqi_fp1_jump_nt_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqi_fp1_jump_t_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqi_fp1_jump_t_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqi_t_jumpnv_nt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqi_t_jumpnv_t(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqi_tp0_jump_nt_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqi_tp0_jump_nt_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqi_tp0_jump_t_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqi_tp0_jump_t_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqi_tp1_jump_nt_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqi_tp1_jump_nt_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqi_tp1_jump_t_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqi_tp1_jump_t_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqn1_f_jumpnv_nt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqn1_f_jumpnv_t(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqn1_fp0_jump_nt_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqn1_fp0_jump_nt_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqn1_fp0_jump_t_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqn1_fp0_jump_t_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqn1_fp1_jump_nt_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqn1_fp1_jump_nt_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqn1_fp1_jump_t_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqn1_fp1_jump_t_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqn1_t_jumpnv_nt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqn1_t_jumpnv_t(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqn1_tp0_jump_nt_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqn1_tp0_jump_nt_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqn1_tp0_jump_t_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqn1_tp0_jump_t_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqn1_tp1_jump_nt_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqn1_tp1_jump_nt_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqn1_tp1_jump_t_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpeqn1_tp1_jump_t_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgt_f_jumpnv_nt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgt_f_jumpnv_t(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgt_fp0_jump_nt_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgt_fp0_jump_nt_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgt_fp0_jump_t_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgt_fp0_jump_t_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgt_fp1_jump_nt_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgt_fp1_jump_nt_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgt_fp1_jump_t_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgt_fp1_jump_t_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgt_t_jumpnv_nt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgt_t_jumpnv_t(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgt_tp0_jump_nt_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgt_tp0_jump_nt_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgt_tp0_jump_t_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgt_tp0_jump_t_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgt_tp1_jump_nt_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgt_tp1_jump_nt_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgt_tp1_jump_t_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgt_tp1_jump_t_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgti_f_jumpnv_nt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgti_f_jumpnv_t(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgti_fp0_jump_nt_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgti_fp0_jump_nt_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgti_fp0_jump_t_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgti_fp0_jump_t_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgti_fp1_jump_nt_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgti_fp1_jump_nt_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgti_fp1_jump_t_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgti_fp1_jump_t_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgti_t_jumpnv_nt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgti_t_jumpnv_t(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgti_tp0_jump_nt_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgti_tp0_jump_nt_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgti_tp0_jump_t_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgti_tp0_jump_t_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgti_tp1_jump_nt_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgti_tp1_jump_nt_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgti_tp1_jump_t_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgti_tp1_jump_t_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtn1_f_jumpnv_nt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtn1_f_jumpnv_t(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtn1_fp0_jump_nt_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtn1_fp0_jump_nt_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtn1_fp0_jump_t_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtn1_fp0_jump_t_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtn1_fp1_jump_nt_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtn1_fp1_jump_nt_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtn1_fp1_jump_t_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtn1_fp1_jump_t_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtn1_t_jumpnv_nt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtn1_t_jumpnv_t(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtn1_tp0_jump_nt_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtn1_tp0_jump_nt_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtn1_tp0_jump_t_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtn1_tp0_jump_t_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtn1_tp1_jump_nt_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtn1_tp1_jump_nt_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtn1_tp1_jump_t_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtn1_tp1_jump_t_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtu_f_jumpnv_nt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtu_f_jumpnv_t(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtu_fp0_jump_nt_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtu_fp0_jump_nt_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtu_fp0_jump_t_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtu_fp0_jump_t_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtu_fp1_jump_nt_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtu_fp1_jump_nt_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtu_fp1_jump_t_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtu_fp1_jump_t_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtu_t_jumpnv_nt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtu_t_jumpnv_t(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtu_tp0_jump_nt_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtu_tp0_jump_nt_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtu_tp0_jump_t_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtu_tp0_jump_t_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtu_tp1_jump_nt_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtu_tp1_jump_nt_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtu_tp1_jump_t_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtu_tp1_jump_t_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtui_f_jumpnv_nt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtui_f_jumpnv_t(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtui_fp0_jump_nt_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtui_fp0_jump_nt_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtui_fp0_jump_t_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtui_fp0_jump_t_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtui_fp1_jump_nt_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtui_fp1_jump_nt_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtui_fp1_jump_t_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtui_fp1_jump_t_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtui_t_jumpnv_nt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtui_t_jumpnv_t(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtui_tp0_jump_nt_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtui_tp0_jump_nt_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtui_tp0_jump_t_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtui_tp0_jump_t_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtui_tp1_jump_nt_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtui_tp1_jump_nt_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtui_tp1_jump_t_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpgtui_tp1_jump_t_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmplt_f_jumpnv_nt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmplt_f_jumpnv_t(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmplt_t_jumpnv_nt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmplt_t_jumpnv_t(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpltu_f_jumpnv_nt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpltu_f_jumpnv_t(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpltu_t_jumpnv_nt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_cmpltu_t_jumpnv_t(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_hintjumpr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_jumpseti(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_jumpsetr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_tstbit0_f_jumpnv_nt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_tstbit0_f_jumpnv_t(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_tstbit0_fp0_jump_nt_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_tstbit0_fp0_jump_nt_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_tstbit0_fp0_jump_t_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_tstbit0_fp0_jump_t_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_tstbit0_fp1_jump_nt_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_tstbit0_fp1_jump_nt_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_tstbit0_fp1_jump_t_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_tstbit0_fp1_jump_t_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_tstbit0_t_jumpnv_nt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_tstbit0_t_jumpnv_t(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_tstbit0_tp0_jump_nt_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_tstbit0_tp0_jump_nt_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_tstbit0_tp0_jump_t_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_tstbit0_tp0_jump_t_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_tstbit0_tp1_jump_nt_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_tstbit0_tp1_jump_nt_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_tstbit0_tp1_jump_t_part0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j4_tstbit0_tp1_jump_t_part1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_deallocframe(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadalignb_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadalignb_pbr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadalignb_pci(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadalignb_pcr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadalignb_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadalignb_pr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadalignh_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadalignh_pbr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadalignh_pci(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadalignh_pcr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadalignh_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadalignh_pr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadbsw2_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadbsw2_pbr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadbsw2_pci(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadbsw2_pcr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadbsw2_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadbsw2_pr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadbsw4_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadbsw4_pbr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadbsw4_pci(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadbsw4_pcr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadbsw4_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadbsw4_pr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadbzw2_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadbzw2_pbr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadbzw2_pci(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadbzw2_pcr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadbzw2_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadbzw2_pr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadbzw4_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadbzw4_pbr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadbzw4_pci(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadbzw4_pcr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadbzw4_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadbzw4_pr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadrb_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadrb_pbr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadrb_pci(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadrb_pcr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadrb_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadrb_pr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadrbgp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadrd_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadrd_pbr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadrd_pci(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadrd_pcr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadrd_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadrd_pr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadrdgp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadrh_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadrh_pbr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadrh_pci(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadrh_pcr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadrh_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadrh_pr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadrhgp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadri_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadri_pbr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadri_pci(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadri_pcr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadri_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadri_pr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadrigp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadrub_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadrub_pbr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadrub_pci(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadrub_pcr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadrub_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadrub_pr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadrubgp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadruh_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadruh_pbr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadruh_pci(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadruh_pcr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadruh_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadruh_pr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadruhgp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadw_aq(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_loadw_locked(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrbf_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrbf_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrbfnew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrbfnew_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrbt_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrbt_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrbtnew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrbtnew_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrdf_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrdf_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrdfnew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrdfnew_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrdt_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrdt_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrdtnew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrdtnew_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrhf_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrhf_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrhfnew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrhfnew_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrht_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrht_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrhtnew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrhtnew_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrif_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrif_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrifnew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrifnew_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrit_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrit_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadritnew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadritnew_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrubf_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrubf_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrubfnew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrubfnew_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrubt_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrubt_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrubtnew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadrubtnew_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadruhf_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadruhf_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadruhfnew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadruhfnew_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadruht_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadruht_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadruhtnew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l2_ploadruhtnew_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_add_memopb_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_add_memoph_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_add_memopw_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_and_memopb_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_and_memoph_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_and_memopw_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_iadd_memopb_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_iadd_memoph_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_iadd_memopw_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_iand_memopb_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_iand_memoph_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_iand_memopw_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ior_memopb_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ior_memoph_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ior_memopw_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_isub_memopb_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_isub_memoph_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_isub_memopw_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_loadalignb_ap(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_loadalignb_ur(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_loadalignh_ap(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_loadalignh_ur(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_loadbsw2_ap(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_loadbsw2_ur(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_loadbsw4_ap(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_loadbsw4_ur(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_loadbzw2_ap(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_loadbzw2_ur(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_loadbzw4_ap(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_loadbzw4_ur(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_loadd_aq(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_loadd_locked(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_loadrb_ap(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_loadrb_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_loadrb_ur(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_loadrd_ap(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_loadrd_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_loadrd_ur(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_loadrh_ap(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_loadrh_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_loadrh_ur(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_loadri_ap(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_loadri_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_loadri_ur(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_loadrub_ap(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_loadrub_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_loadrub_ur(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_loadruh_ap(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_loadruh_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_loadruh_ur(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_or_memopb_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_or_memoph_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_or_memopw_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrbf_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrbf_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrbfnew_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrbfnew_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrbt_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrbt_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrbtnew_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrbtnew_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrdf_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrdf_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrdfnew_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrdfnew_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrdt_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrdt_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrdtnew_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrdtnew_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrhf_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrhf_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrhfnew_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrhfnew_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrht_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrht_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrhtnew_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrhtnew_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrif_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrif_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrifnew_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrifnew_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrit_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrit_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadritnew_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadritnew_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrubf_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrubf_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrubfnew_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrubfnew_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrubt_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrubt_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrubtnew_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadrubtnew_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadruhf_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadruhf_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadruhfnew_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadruhfnew_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadruht_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadruht_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadruhtnew_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_ploadruhtnew_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_return(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_return_f(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_return_fnew_pnt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_return_fnew_pt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_return_t(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_return_tnew_pnt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_return_tnew_pt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_sub_memopb_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_sub_memoph_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l4_sub_memopw_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_l6_memcpy(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_acci(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_accii(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_cmaci_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_cmacr_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_cmacs_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_cmacs_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_cmacsc_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_cmacsc_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_cmpyi_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_cmpyr_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_cmpyrs_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_cmpyrs_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_cmpyrsc_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_cmpyrsc_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_cmpys_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_cmpys_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_cmpysc_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_cmpysc_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_cnacs_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_cnacs_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_cnacsc_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_cnacsc_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_dpmpyss_acc_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_dpmpyss_nac_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_dpmpyss_rnd_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_dpmpyss_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_dpmpyuu_acc_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_dpmpyuu_nac_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_dpmpyuu_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_hmmpyh_rs1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_hmmpyh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_hmmpyl_rs1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_hmmpyl_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_maci(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_macsin(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_macsip(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mmachs_rs0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mmachs_rs1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mmachs_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mmachs_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mmacls_rs0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mmacls_rs1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mmacls_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mmacls_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mmacuhs_rs0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mmacuhs_rs1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mmacuhs_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mmacuhs_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mmaculs_rs0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mmaculs_rs1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mmaculs_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mmaculs_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mmpyh_rs0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mmpyh_rs1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mmpyh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mmpyh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mmpyl_rs0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mmpyl_rs1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mmpyl_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mmpyl_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mmpyuh_rs0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mmpyuh_rs1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mmpyuh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mmpyuh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mmpyul_rs0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mmpyul_rs1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mmpyul_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mmpyul_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mnaci(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_acc_hh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_acc_hh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_acc_hl_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_acc_hl_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_acc_lh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_acc_lh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_acc_ll_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_acc_ll_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_acc_sat_hh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_acc_sat_hh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_acc_sat_hl_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_acc_sat_hl_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_acc_sat_lh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_acc_sat_lh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_acc_sat_ll_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_acc_sat_ll_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_hh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_hh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_hl_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_hl_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_lh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_lh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_ll_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_ll_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_nac_hh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_nac_hh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_nac_hl_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_nac_hl_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_nac_lh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_nac_lh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_nac_ll_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_nac_ll_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_nac_sat_hh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_nac_sat_hh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_nac_sat_hl_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_nac_sat_hl_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_nac_sat_lh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_nac_sat_lh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_nac_sat_ll_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_nac_sat_ll_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_rnd_hh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_rnd_hh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_rnd_hl_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_rnd_hl_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_rnd_lh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_rnd_lh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_rnd_ll_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_rnd_ll_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_sat_hh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_sat_hh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_sat_hl_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_sat_hl_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_sat_lh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_sat_lh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_sat_ll_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_sat_ll_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_sat_rnd_hh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_sat_rnd_hh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_sat_rnd_hl_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_sat_rnd_hl_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_sat_rnd_lh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_sat_rnd_lh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_sat_rnd_ll_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_sat_rnd_ll_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_up(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_up_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpy_up_s1_sat(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyd_acc_hh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyd_acc_hh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyd_acc_hl_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyd_acc_hl_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyd_acc_lh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyd_acc_lh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyd_acc_ll_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyd_acc_ll_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyd_hh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyd_hh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyd_hl_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyd_hl_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyd_lh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyd_lh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyd_ll_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyd_ll_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyd_nac_hh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyd_nac_hh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyd_nac_hl_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyd_nac_hl_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyd_nac_lh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyd_nac_lh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyd_nac_ll_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyd_nac_ll_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyd_rnd_hh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyd_rnd_hh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyd_rnd_hl_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyd_rnd_hl_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyd_rnd_lh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyd_rnd_lh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyd_rnd_ll_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyd_rnd_ll_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpysin(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpysip(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpysu_up(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyu_acc_hh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyu_acc_hh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyu_acc_hl_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyu_acc_hl_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyu_acc_lh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyu_acc_lh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyu_acc_ll_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyu_acc_ll_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyu_hh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyu_hh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyu_hl_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyu_hl_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyu_lh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyu_lh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyu_ll_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyu_ll_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyu_nac_hh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyu_nac_hh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyu_nac_hl_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyu_nac_hl_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyu_nac_lh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyu_nac_lh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyu_nac_ll_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyu_nac_ll_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyu_up(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyud_acc_hh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyud_acc_hh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyud_acc_hl_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyud_acc_hl_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyud_acc_lh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyud_acc_lh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyud_acc_ll_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyud_acc_ll_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyud_hh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyud_hh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyud_hl_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyud_hl_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyud_lh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyud_lh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyud_ll_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyud_ll_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyud_nac_hh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyud_nac_hh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyud_nac_hl_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyud_nac_hl_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyud_nac_lh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyud_nac_lh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyud_nac_ll_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_mpyud_nac_ll_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_nacci(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_naccii(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_subacc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vabsdiffh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vabsdiffw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vcmac_s0_sat_i(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vcmac_s0_sat_r(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vcmpy_s0_sat_i(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vcmpy_s0_sat_r(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vcmpy_s1_sat_i(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vcmpy_s1_sat_r(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vdmacs_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vdmacs_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vdmpyrs_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vdmpyrs_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vdmpys_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vdmpys_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vmac2(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vmac2es(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vmac2es_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vmac2es_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vmac2s_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vmac2s_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vmac2su_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vmac2su_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vmpy2es_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vmpy2es_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vmpy2s_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vmpy2s_s0pack(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vmpy2s_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vmpy2s_s1pack(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vmpy2su_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vmpy2su_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vraddh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vradduh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vrcmaci_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vrcmaci_s0c(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vrcmacr_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vrcmacr_s0c(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vrcmpyi_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vrcmpyi_s0c(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vrcmpyr_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vrcmpyr_s0c(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vrcmpys_acc_s1_h(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vrcmpys_acc_s1_l(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vrcmpys_s1_h(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vrcmpys_s1_l(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vrcmpys_s1rp_h(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vrcmpys_s1rp_l(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vrmac_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_vrmpy_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m2_xor_xacc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m4_and_and(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m4_and_andn(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m4_and_or(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m4_and_xor(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m4_cmpyi_wh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m4_cmpyi_whc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m4_cmpyr_wh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m4_cmpyr_whc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m4_mac_up_s1_sat(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m4_mpyri_addi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m4_mpyri_addr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m4_mpyri_addr_u2(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m4_mpyrr_addi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m4_mpyrr_addr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m4_nac_up_s1_sat(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m4_or_and(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m4_or_andn(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m4_or_or(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m4_or_xor(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m4_pmpyw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m4_pmpyw_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m4_vpmpyh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m4_vpmpyh_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m4_vrmpyeh_acc_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m4_vrmpyeh_acc_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m4_vrmpyeh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m4_vrmpyeh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m4_vrmpyoh_acc_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m4_vrmpyoh_acc_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m4_vrmpyoh_s0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m4_vrmpyoh_s1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m4_xor_and(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m4_xor_andn(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m4_xor_or(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m4_xor_xacc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m5_vdmacbsu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m5_vdmpybsu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m5_vmacbsu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m5_vmacbuu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m5_vmpybsu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m5_vmpybuu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m5_vrmacbsu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m5_vrmacbuu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m5_vrmpybsu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m5_vrmpybuu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m6_vabsdiffb(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m6_vabsdiffub(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m7_dcmpyiw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m7_dcmpyiw_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m7_dcmpyiwc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m7_dcmpyiwc_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m7_dcmpyrw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m7_dcmpyrw_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m7_dcmpyrwc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m7_dcmpyrwc_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m7_wcmpyiw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m7_wcmpyiw_rnd(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m7_wcmpyiwc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m7_wcmpyiwc_rnd(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m7_wcmpyrw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m7_wcmpyrw_rnd(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m7_wcmpyrwc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_m7_wcmpyrwc_rnd(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_r6_release_at_vi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_r6_release_st_vi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_addasl_rrri(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_allocframe(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asl_i_p(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asl_i_p_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asl_i_p_and(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asl_i_p_nac(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asl_i_p_or(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asl_i_p_xacc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asl_i_r(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asl_i_r_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asl_i_r_and(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asl_i_r_nac(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asl_i_r_or(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asl_i_r_sat(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asl_i_r_xacc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asl_i_vh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asl_i_vw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asl_r_p(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asl_r_p_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asl_r_p_and(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asl_r_p_nac(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asl_r_p_or(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asl_r_p_xor(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asl_r_r(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asl_r_r_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asl_r_r_and(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asl_r_r_nac(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asl_r_r_or(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asl_r_r_sat(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asl_r_vh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asl_r_vw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asr_i_p(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asr_i_p_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asr_i_p_and(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asr_i_p_nac(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asr_i_p_or(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asr_i_p_rnd(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asr_i_r(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asr_i_r_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asr_i_r_and(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asr_i_r_nac(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asr_i_r_or(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asr_i_r_rnd(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asr_i_svw_trun(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asr_i_vh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asr_i_vw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asr_r_p(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asr_r_p_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asr_r_p_and(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asr_r_p_nac(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asr_r_p_or(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asr_r_p_xor(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asr_r_r(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asr_r_r_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asr_r_r_and(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asr_r_r_nac(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asr_r_r_or(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asr_r_r_sat(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asr_r_svw_trun(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asr_r_vh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_asr_r_vw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_brev(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_brevp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_cabacdecbin(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_cl0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_cl0p(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_cl1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_cl1p(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_clb(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_clbnorm(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_clbp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_clrbit_i(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_clrbit_r(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_ct0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_ct0p(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_ct1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_ct1p(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_deinterleave(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_extractu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_extractu_rp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_extractup(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_extractup_rp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_insert(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_insert_rp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_insertp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_insertp_rp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_interleave(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lfsp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsl_r_p(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsl_r_p_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsl_r_p_and(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsl_r_p_nac(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsl_r_p_or(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsl_r_p_xor(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsl_r_r(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsl_r_r_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsl_r_r_and(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsl_r_r_nac(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsl_r_r_or(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsl_r_vh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsl_r_vw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsr_i_p(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsr_i_p_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsr_i_p_and(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsr_i_p_nac(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsr_i_p_or(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsr_i_p_xacc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsr_i_r(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsr_i_r_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsr_i_r_and(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsr_i_r_nac(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsr_i_r_or(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsr_i_r_xacc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsr_i_vh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsr_i_vw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsr_r_p(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsr_r_p_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsr_r_p_and(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsr_r_p_nac(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsr_r_p_or(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsr_r_p_xor(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsr_r_r(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsr_r_r_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsr_r_r_and(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsr_r_r_nac(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsr_r_r_or(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsr_r_vh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_lsr_r_vw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_mask(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_packhl(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_parityp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerbf_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerbf_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerbfnew_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerbnewf_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerbnewf_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerbnewfnew_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerbnewt_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerbnewt_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerbnewtnew_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerbt_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerbt_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerbtnew_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerdf_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerdf_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerdfnew_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerdt_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerdt_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerdtnew_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerff_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerff_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerffnew_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerft_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerft_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerftnew_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerhf_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerhf_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerhfnew_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerhnewf_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerhnewf_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerhnewfnew_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerhnewt_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerhnewt_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerhnewtnew_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerht_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerht_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerhtnew_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerif_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerif_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerifnew_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerinewf_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerinewf_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerinewfnew_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerinewt_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerinewt_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerinewtnew_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerit_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstorerit_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_pstoreritnew_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_setbit_i(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_setbit_r(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_shuffeb(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_shuffeh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_shuffob(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_shuffoh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerb_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerb_pbr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerb_pci(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerb_pcr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerb_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerb_pr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerbgp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerbnew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerbnew_pbr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerbnew_pci(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerbnew_pcr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerbnew_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerbnew_pr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerbnewgp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerd_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerd_pbr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerd_pci(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerd_pcr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerd_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerd_pr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerdgp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerf_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerf_pbr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerf_pci(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerf_pcr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerf_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerf_pr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerfgp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerh_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerh_pbr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerh_pci(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerh_pcr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerh_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerh_pr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerhgp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerhnew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerhnew_pbr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerhnew_pci(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerhnew_pcr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerhnew_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerhnew_pr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerhnewgp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storeri_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storeri_pbr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storeri_pci(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storeri_pcr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storeri_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storeri_pr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerigp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerinew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerinew_pbr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerinew_pci(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerinew_pcr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerinew_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerinew_pr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storerinewgp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storew_locked(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storew_rl_at_vi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_storew_rl_st_vi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_svsathb(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_svsathub(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_tableidxb(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_tableidxd(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_tableidxh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_tableidxw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_togglebit_i(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_togglebit_r(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_tstbit_i(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_tstbit_r(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_valignib(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_valignrb(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_vcnegh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_vcrotate(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_vrcnegh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_vrndpackwh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_vrndpackwhs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_vsathb(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_vsathb_nopack(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_vsathub(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_vsathub_nopack(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_vsatwh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_vsatwh_nopack(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_vsatwuh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_vsatwuh_nopack(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_vsplatrb(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_vsplatrh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_vspliceib(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_vsplicerb(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_vsxtbh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_vsxthw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_vtrunehb(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_vtrunewh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_vtrunohb(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_vtrunowh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_vzxtbh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s2_vzxthw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_addaddi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_addi_asl_ri(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_addi_lsr_ri(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_andi_asl_ri(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_andi_lsr_ri(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_clbaddi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_clbpaddi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_clbpnorm(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_extract(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_extract_rp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_extractp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_extractp_rp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_lsli(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_ntstbit_i(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_ntstbit_r(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_or_andi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_or_andix(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_or_ori(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_ori_asl_ri(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_ori_lsr_ri(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_parity(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerbf_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerbf_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerbfnew_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerbfnew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerbfnew_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerbnewf_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerbnewf_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerbnewfnew_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerbnewfnew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerbnewfnew_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerbnewt_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerbnewt_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerbnewtnew_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerbnewtnew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerbnewtnew_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerbt_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerbt_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerbtnew_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerbtnew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerbtnew_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerdf_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerdf_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerdfnew_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerdfnew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerdfnew_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerdt_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerdt_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerdtnew_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerdtnew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerdtnew_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerff_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerff_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerffnew_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerffnew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerffnew_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerft_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerft_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerftnew_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerftnew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerftnew_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerhf_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerhf_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerhfnew_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerhfnew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerhfnew_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerhnewf_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerhnewf_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerhnewfnew_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerhnewfnew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerhnewfnew_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerhnewt_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerhnewt_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerhnewtnew_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerhnewtnew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerhnewtnew_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerht_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerht_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerhtnew_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerhtnew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerhtnew_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerif_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerif_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerifnew_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerifnew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerifnew_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerinewf_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerinewf_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerinewfnew_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerinewfnew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerinewfnew_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerinewt_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerinewt_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerinewtnew_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerinewtnew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerinewtnew_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerit_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstorerit_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstoreritnew_abs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstoreritnew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_pstoreritnew_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_stored_locked(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_stored_rl_at_vi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_stored_rl_st_vi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storeirb_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storeirbf_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storeirbfnew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storeirbt_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storeirbtnew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storeirh_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storeirhf_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storeirhfnew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storeirht_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storeirhtnew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storeiri_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storeirif_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storeirifnew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storeirit_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storeiritnew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storerb_ap(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storerb_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storerb_ur(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storerbnew_ap(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storerbnew_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storerbnew_ur(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storerd_ap(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storerd_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storerd_ur(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storerf_ap(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storerf_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storerf_ur(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storerh_ap(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storerh_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storerh_ur(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storerhnew_ap(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storerhnew_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storerhnew_ur(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storeri_ap(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storeri_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storeri_ur(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storerinew_ap(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storerinew_rr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_storerinew_ur(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_subaddi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_subi_asl_ri(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_subi_lsr_ri(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_vrcrotate(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_vrcrotate_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_vxaddsubh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_vxaddsubhr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_vxaddsubw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_vxsubaddh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_vxsubaddhr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s4_vxsubaddw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s5_asrhub_rnd_sat(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s5_asrhub_sat(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s5_popcountp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s5_vasrhrnd(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s6_rol_i_p(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s6_rol_i_p_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s6_rol_i_p_and(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s6_rol_i_p_nac(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s6_rol_i_p_or(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s6_rol_i_p_xacc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s6_rol_i_r(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s6_rol_i_r_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s6_rol_i_r_and(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s6_rol_i_r_nac(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s6_rol_i_r_or(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s6_rol_i_r_xacc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s6_vsplatrbp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s6_vtrunehb_ppp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_s6_vtrunohb_ppp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32ub_ai(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32ub_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32ub_ppu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_ai(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_cur_ai(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_cur_npred_ai(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_cur_npred_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_cur_npred_ppu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_cur_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_cur_ppu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_cur_pred_ai(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_cur_pred_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_cur_pred_ppu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_npred_ai(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_npred_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_npred_ppu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_nt_ai(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_nt_cur_ai(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_nt_cur_npred_ai(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_nt_cur_npred_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_nt_cur_npred_ppu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_nt_cur_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_nt_cur_ppu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_nt_cur_pred_ai(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_nt_cur_pred_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_nt_cur_pred_ppu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_nt_npred_ai(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_nt_npred_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_nt_npred_ppu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_nt_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_nt_ppu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_nt_pred_ai(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_nt_pred_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_nt_pred_ppu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_nt_tmp_ai(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_nt_tmp_npred_ai(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_nt_tmp_npred_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_nt_tmp_npred_ppu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_nt_tmp_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_nt_tmp_ppu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_nt_tmp_pred_ai(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_nt_tmp_pred_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_nt_tmp_pred_ppu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_ppu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_pred_ai(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_pred_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_pred_ppu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_tmp_ai(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_tmp_npred_ai(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_tmp_npred_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_tmp_npred_ppu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_tmp_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_tmp_ppu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_tmp_pred_ai(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_tmp_pred_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vl32b_tmp_pred_ppu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32ub_ai(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32ub_npred_ai(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32ub_npred_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32ub_npred_ppu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32ub_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32ub_ppu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32ub_pred_ai(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32ub_pred_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32ub_pred_ppu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32b_ai(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32b_npred_ai(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32b_npred_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32b_npred_ppu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32b_nqpred_ai(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32b_nqpred_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32b_nqpred_ppu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32b_nt_ai(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32b_nt_npred_ai(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32b_nt_npred_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32b_nt_npred_ppu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32b_nt_nqpred_ai(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32b_nt_nqpred_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32b_nt_nqpred_ppu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32b_nt_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32b_nt_ppu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32b_nt_pred_ai(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32b_nt_pred_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32b_nt_pred_ppu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32b_nt_qpred_ai(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32b_nt_qpred_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32b_nt_qpred_ppu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32b_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32b_ppu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32b_pred_ai(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32b_pred_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32b_pred_ppu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32b_qpred_ai(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32b_qpred_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32b_qpred_ppu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32b_srls_ai(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32b_srls_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vs32b_srls_ppu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vabs_hf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vabs_sf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vadd_hf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vadd_hf_hf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vadd_qf16(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vadd_qf16_mix(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vadd_qf32(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vadd_qf32_mix(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vadd_sf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vadd_sf_bf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vadd_sf_hf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vadd_sf_sf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vassign_fp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vconv_h_hf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vconv_hf_h(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vconv_hf_qf16(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vconv_hf_qf32(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vconv_sf_qf32(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vconv_sf_w(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vconv_w_sf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vcvt_b_hf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vcvt_bf_sf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vcvt_h_hf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vcvt_hf_b(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vcvt_hf_h(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vcvt_hf_sf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vcvt_hf_ub(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vcvt_hf_uh(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vcvt_sf_hf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vcvt_ub_hf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vcvt_uh_hf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vdmpy_sf_hf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vdmpy_sf_hf_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vfmax_hf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vfmax_sf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vfmin_hf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vfmin_sf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vfneg_hf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vfneg_sf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vgtbf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vgtbf_and(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vgtbf_or(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vgtbf_xor(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vgthf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vgthf_and(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vgthf_or(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vgthf_xor(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vgtsf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vgtsf_and(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vgtsf_or(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vgtsf_xor(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vmax_bf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vmax_hf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vmax_sf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vmin_bf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vmin_hf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vmin_sf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vmpy_hf_hf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vmpy_hf_hf_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vmpy_qf16(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vmpy_qf16_hf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vmpy_qf16_mix_hf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vmpy_qf32(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vmpy_qf32_hf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vmpy_qf32_mix_hf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vmpy_qf32_qf16(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vmpy_qf32_sf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vmpy_sf_bf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vmpy_sf_bf_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vmpy_sf_hf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vmpy_sf_hf_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vmpy_sf_sf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vrmpybub_rtt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vrmpybub_rtt_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vrmpyub_rtt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vrmpyub_rtt_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vrmpyzbb_rt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vrmpyzbb_rt_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vrmpyzbb_rx(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vrmpyzbb_rx_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vrmpyzbub_rt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vrmpyzbub_rt_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vrmpyzbub_rx(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vrmpyzbub_rx_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vrmpyzcb_rt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vrmpyzcb_rt_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vrmpyzcb_rx(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vrmpyzcb_rx_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vrmpyzcbs_rt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vrmpyzcbs_rt_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vrmpyzcbs_rx(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vrmpyzcbs_rx_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vrmpyznb_rt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vrmpyznb_rt_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vrmpyznb_rx(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vrmpyznb_rx_acc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vsub_hf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vsub_hf_hf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vsub_qf16(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vsub_qf16_mix(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vsub_qf32(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vsub_qf32_mix(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vsub_sf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vsub_sf_bf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vsub_sf_hf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_vsub_sf_sf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_zld_ai(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_zld_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_zld_ppu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_zld_pred_ai(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_zld_pred_pi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_zld_pred_ppu(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_v6_zextract(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_barrier(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_break(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_ciad(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_crswap0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_cswi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_dccleana(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_dccleanidx(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_dccleaninva(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_dccleaninvidx(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_dcfetchbo(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_dcinva(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_dcinvidx(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_dckill(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_dctagr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_dctagw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_dczeroa(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_getimask(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_iassignr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_iassignw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_icdatar(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_icdataw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_icinva(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_icinvidx(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_ickill(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_ictagr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_ictagw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_isync(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_k0lock(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_k0unlock(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_l2cleaninvidx(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_l2kill(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_resume(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_setimask(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_setprio(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_start(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_stop(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_swi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_syncht(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_tfrscrr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_tfrsrcr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_tlblock(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_tlbp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_tlbr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_tlbunlock(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_tlbw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y2_wait(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y4_crswap1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y4_crswap10(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y4_l2fetch(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y4_l2tagr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y4_l2tagw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y4_nmi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y4_siad(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y4_tfrscpp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y4_tfrspcp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y4_trace(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y5_ctlbw(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y5_l2cleanidx(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y5_l2fetch(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y5_l2gclean(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y5_l2gcleaninv(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y5_l2gunlock(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y5_l2invidx(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y5_l2locka(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y5_l2unlocka(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y5_tlbasidi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y5_tlboc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y6_diag(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y6_diag0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y6_diag1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y6_dmlink(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y6_dmpause(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y6_dmpoll(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y6_dmresume(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y6_dmstart(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_y6_dmwait(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_dep_a2_addsat(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_dep_a2_subsat(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_dep_s2_packhl(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_invalid_decode(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_imported_rd_ss(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_imported_rd_memw_phys_rs_rt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_imported_rdd_sss(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_imported_sd_rs(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_imported_sdd_rss(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_imported_l2gclean_rtt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_imported_l2gcleaninv_rtt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sa1_addi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sa1_addrx(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sa1_addsp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sa1_and1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sa1_clrf(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sa1_clrfnew(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sa1_clrt(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sa1_clrtnew(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sa1_cmpeqi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sa1_combine0i(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sa1_combine1i(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sa1_combine2i(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sa1_combine3i(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sa1_combinerz(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sa1_combinezr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sa1_dec(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sa1_inc(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sa1_seti(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sa1_setin1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sa1_sxtb(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sa1_sxth(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sa1_tfr(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sa1_zxtb(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sa1_zxth(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sl1_loadri_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sl1_loadrub_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sl2_deallocframe(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sl2_jumpr31(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sl2_jumpr31_f(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sl2_jumpr31_fnew(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sl2_jumpr31_t(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sl2_jumpr31_tnew(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sl2_loadrb_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sl2_loadrd_sp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sl2_loadrh_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sl2_loadri_sp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sl2_loadruh_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sl2_return(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sl2_return_f(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sl2_return_fnew(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sl2_return_t(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_sl2_return_tnew(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_ss1_storeb_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_ss1_storew_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_ss2_allocframe(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_ss2_storebi0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_ss2_storebi1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_ss2_stored_sp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_ss2_storeh_io(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_ss2_storew_sp(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_ss2_storewi0(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_ss2_storewi1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_undocumented_sa2_tfrsi(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_endloop01(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_endloop1(HexInsnPktBundle *bundle); +RzILOpEffect *hex_il_op_j2_endloop0(HexInsnPktBundle *bundle); +RZ_OWN RzILOpEffect *hex_fcirc_add(HexInsnPktBundle *bundle, const HexOp *RxV, RZ_BORROW RzILOpPure *offset, RZ_BORROW RzILOpPure *M, RZ_BORROW RzILOpPure *CS); +RZ_OWN RzILOpEffect *hex_trap(RZ_BORROW RzILOpPure *trap_type, RZ_BORROW RzILOpPure *imm); +RZ_OWN RzILOpEffect *hex_clz32(RZ_BORROW RzILOpPure *t); +RZ_OWN RzILOpEffect *hex_clz64(RZ_BORROW RzILOpPure *t); +RZ_OWN RzILOpEffect *hex_clo32(RZ_BORROW RzILOpPure *x); +RZ_OWN RzILOpEffect *hex_clo64(RZ_BORROW RzILOpPure *x); +RZ_OWN RzILOpEffect *hex_revbit16(RZ_BORROW RzILOpPure *t); +RZ_OWN RzILOpEffect *hex_revbit32(RZ_BORROW RzILOpPure *t); +RZ_OWN RzILOpEffect *hex_revbit64(RZ_BORROW RzILOpPure *t); +RZ_OWN RzILOpEffect *hex_fbrev(RZ_BORROW RzILOpPure *addr); +RZ_OWN RzILOpEffect *hex_conv_round(RZ_BORROW RzILOpPure *a, RZ_BORROW RzILOpPure *n); +RZ_OWN RzILOpEffect *hex_set_usr_field(HexInsnPktBundle *bundle, HexRegField field, RZ_BORROW RzILOpPure *val); +RZ_OWN RzILOpEffect *hex_get_usr_field(HexInsnPktBundle *bundle, HexRegField field); + +#endif diff --git a/librz/arch/isa/hexagon/hexagon_il_getter_table.h b/librz/arch/isa/hexagon/hexagon_il_getter_table.h new file mode 100644 index 00000000000..4532fb2276c --- /dev/null +++ b/librz/arch/isa/hexagon/hexagon_il_getter_table.h @@ -0,0 +1,6718 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#ifndef HEXAGON_IL_GETTER_TABLE_H +#define HEXAGON_IL_GETTER_TABLE_H + +#include +#include +#include +#include + +static HexILInsn hex_il_getter_lt[] = { + { { (HexILOpGetter)hex_il_op_invalid_decode, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_abs, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_absp, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_abssat, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_add, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_addh_h16_hh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_addh_h16_hl, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_addh_h16_lh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_addh_h16_ll, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_addh_h16_sat_hh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_addh_h16_sat_hl, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_addh_h16_sat_lh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_addh_h16_sat_ll, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_addh_l16_hl, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_addh_l16_ll, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_addh_l16_sat_hl, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_addh_l16_sat_ll, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_addi, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_addp, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_addpsat, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_addsat, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_addsph, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_addspl, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_and, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_andir, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_andp, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_aslh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_asrh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_combine_hh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_combine_hl, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_combine_lh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_combine_ll, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_combineii, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_combinew, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_max, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_maxp, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_maxu, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_maxup, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_min, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_minp, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_minu, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_minup, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_negp, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_negsat, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_nop, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_notp, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_or, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_orir, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_orp, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_paddf, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_paddfnew, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_paddif, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_paddifnew, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_paddit, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_padditnew, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_paddt, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_paddtnew, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_pandf, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_pandfnew, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_pandt, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_pandtnew, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_porf, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_porfnew, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_port, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_portnew, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_psubf, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_psubfnew, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_psubt, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_psubtnew, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_pxorf, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_pxorfnew, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_pxort, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_pxortnew, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_roundsat, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_sat, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_satb, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_sath, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_satub, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_satuh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_sub, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_subh_h16_hh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_subh_h16_hl, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_subh_h16_lh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_subh_h16_ll, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_subh_h16_sat_hh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_subh_h16_sat_hl, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_subh_h16_sat_lh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_subh_h16_sat_ll, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_subh_l16_hl, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_subh_l16_ll, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_subh_l16_sat_hl, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_subh_l16_sat_ll, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_subp, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_subri, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_subsat, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_svaddh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_svaddhs, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_svadduhs, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_svavgh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_svavghs, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_svnavgh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_svsubh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_svsubhs, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_svsubuhs, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_swiz, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_sxtb, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_sxth, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_sxtw, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_tfr, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_tfrcrr, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_tfrih, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_tfril, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_tfrrcr, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_tfrsi, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vabsh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vabshsat, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vabsw, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vabswsat, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vaddh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vaddhs, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vaddub, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vaddubs, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vadduhs, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vaddw, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vaddws, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vavgh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vavghcr, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vavghr, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vavgub, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vavgubr, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vavguh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vavguhr, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vavguw, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vavguwr, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vavgw, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vavgwcr, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vavgwr, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vcmpbeq, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vcmpbgtu, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vcmpheq, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vcmphgt, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vcmphgtu, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vcmpweq, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vcmpwgt, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vcmpwgtu, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vconj, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vmaxb, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vmaxh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vmaxub, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vmaxuh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vmaxuw, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vmaxw, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vminb, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vminh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vminub, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vminuh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vminuw, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vminw, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vnavgh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vnavghcr, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vnavghr, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vnavgw, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vnavgwcr, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vnavgwr, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vraddub, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vraddub_acc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vrsadub, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vrsadub_acc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vsubh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vsubhs, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vsubub, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vsububs, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vsubuhs, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vsubw, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_vsubws, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_xor, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_xorp, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a2_zxth, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_addp_c, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_andn, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_andnp, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_bitsplit, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_bitspliti, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_boundscheck_hi, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_boundscheck_lo, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_cmpbeq, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_cmpbeqi, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_cmpbgt, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_cmpbgti, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_cmpbgtu, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_cmpbgtui, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_cmpheq, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_cmpheqi, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_cmphgt, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_cmphgti, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_cmphgtu, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_cmphgtui, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_combineii, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_combineir, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_combineri, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_cround_ri, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_cround_rr, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_ext, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_modwrapu, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_orn, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_ornp, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_paslhf, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_paslhfnew, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_paslht, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_paslhtnew, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_pasrhf, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_pasrhfnew, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_pasrht, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_pasrhtnew, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_psxtbf, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_psxtbfnew, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_psxtbt, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_psxtbtnew, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_psxthf, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_psxthfnew, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_psxtht, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_psxthtnew, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_pzxtbf, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_pzxtbfnew, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_pzxtbt, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_pzxtbtnew, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_pzxthf, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_pzxthfnew, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_pzxtht, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_pzxthtnew, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_rcmpeq, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_rcmpeqi, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_rcmpneq, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_rcmpneqi, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_round_ri, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_round_ri_sat, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_round_rr, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_round_rr_sat, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_subp_c, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_tfrcpp, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_tfrpcp, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_tlbmatch, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_vcmpbeq_any, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_vcmpbeqi, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_vcmpbgt, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_vcmpbgti, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_vcmpbgtui, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_vcmpheqi, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_vcmphgti, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_vcmphgtui, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_vcmpweqi, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_vcmpwgti, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_vcmpwgtui, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_vrmaxh, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_vrmaxuh, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_vrmaxuw, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_vrmaxw, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_vrminh, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_vrminuh, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_vrminuw, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a4_vrminw, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a5_acs, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a5_vaddhubs, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a6_vcmpbeq_notany, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a6_vminub_rdp, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a7_clip, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a7_croundd_ri, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a7_croundd_rr, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_a7_vclip, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_all8, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_and, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_andn, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_any8, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_bitsclr, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_bitsclri, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_bitsset, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_ccombinewf, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_ccombinewnewf, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_ccombinewnewt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_ccombinewt, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_cmoveif, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_cmoveit, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_cmovenewif, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_cmovenewit, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_cmpeq, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_cmpeqi, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_cmpeqp, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_cmpgt, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_cmpgti, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_cmpgtp, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_cmpgtu, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_cmpgtui, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_cmpgtup, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_mask, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_mux, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_muxii, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_muxir, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_muxri, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_not, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_or, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_orn, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_tfrpr, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_tfrrp, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_vitpack, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_vmux, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c2_xor, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c4_addipc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c4_and_and, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c4_and_andn, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c4_and_or, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c4_and_orn, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c4_cmplte, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c4_cmpltei, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c4_cmplteu, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c4_cmplteui, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c4_cmpneq, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c4_cmpneqi, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c4_fastcorner9, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c4_fastcorner9_not, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c4_nbitsclr, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c4_nbitsclri, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c4_nbitsset, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c4_or_and, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c4_or_andn, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c4_or_or, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_c4_or_orn, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_conv_d2df, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_conv_d2sf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_conv_df2d, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_conv_df2d_chop, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_conv_df2sf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_conv_df2ud, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_conv_df2ud_chop, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_conv_df2uw, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_conv_df2uw_chop, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_conv_df2w, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_conv_df2w_chop, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_conv_sf2d, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_conv_sf2d_chop, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_conv_sf2df, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_conv_sf2ud, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_conv_sf2ud_chop, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_conv_sf2uw, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_conv_sf2uw_chop, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_conv_sf2w, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_conv_sf2w_chop, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_conv_ud2df, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_conv_ud2sf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_conv_uw2df, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_conv_uw2sf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_conv_w2df, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_conv_w2sf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_dfadd, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_dfclass, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_dfcmpeq, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_dfcmpge, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_dfcmpgt, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_dfcmpuo, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_dfimm_n, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_dfimm_p, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_dfmax, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_dfmin, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_dfmpyhh, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_dfmpylh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_dfmpyll, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_dfsub, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_sfadd, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_sfclass, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_sfcmpeq, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_sfcmpge, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_sfcmpgt, HEX_IL_INSN_ATTR_WPRED }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_sfcmpuo, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_sffixupd, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_sffixupn, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_sffixupr, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_sffma, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_sffma_lib, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_sffma_sc, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_sffms, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_sffms_lib, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_sfimm_n, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_sfimm_p, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_sfinvsqrta, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_sfmax, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_sfmin, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_sfmpy, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_sfrecipa, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_f2_sfsub, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_g4_tfrgcpp, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_g4_tfrgcrr, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_g4_tfrgpcp, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_g4_tfrgrcr, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_imported_rd_ss, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_imported_rd_memw_phys_rs_rt, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_imported_rdd_sss, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_imported_sd_rs, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_imported_sdd_rss, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_imported_l2gclean_rtt, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_imported_l2gcleaninv_rtt, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_call, HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_callf, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_callr, HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_callrf, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_callrh, HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_callrt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_callt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_jump, HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_jumpf, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_jumpfnew, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_jumpfnewpt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_jumpfpt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_jumpr, HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_jumprf, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_jumprfnew, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_jumprfnewpt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_jumprfpt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_jumprgtez, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_jumprgtezpt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_jumprh, HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_jumprltez, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_jumprltezpt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_jumprnz, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_jumprnzpt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_jumprt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_jumprtnew, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_jumprtnewpt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_jumprtpt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_jumprz, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_jumprzpt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_jumpt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_jumptnew, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_jumptnewpt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_jumptpt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_loop0i, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_loop0r, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_loop1i, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_loop1r, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_pause, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_ploop1si, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_ploop1sr, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_ploop2si, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_ploop2sr, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_ploop3si, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_ploop3sr, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_rte, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_trap0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_trap1, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j2_unpause, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpeq_f_jumpnv_nt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpeq_f_jumpnv_t, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpeq_fp0_jump_nt_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 }, + { (HexILOpGetter)hex_il_op_j4_cmpeq_fp0_jump_nt_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpeq_fp0_jump_t_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 }, + { (HexILOpGetter)hex_il_op_j4_cmpeq_fp0_jump_t_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpeq_fp1_jump_nt_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpeq_fp1_jump_nt_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpeq_fp1_jump_t_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpeq_fp1_jump_t_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpeq_t_jumpnv_nt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpeq_t_jumpnv_t, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpeq_tp0_jump_nt_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpeq_tp0_jump_nt_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpeq_tp0_jump_t_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpeq_tp0_jump_t_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpeq_tp1_jump_nt_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpeq_tp1_jump_nt_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpeq_tp1_jump_t_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpeq_tp1_jump_t_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpeqi_f_jumpnv_nt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpeqi_f_jumpnv_t, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpeqi_fp0_jump_nt_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpeqi_fp0_jump_nt_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpeqi_fp0_jump_t_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpeqi_fp0_jump_t_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpeqi_fp1_jump_nt_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpeqi_fp1_jump_nt_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpeqi_fp1_jump_t_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpeqi_fp1_jump_t_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpeqi_t_jumpnv_nt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpeqi_t_jumpnv_t, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpeqi_tp0_jump_nt_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpeqi_tp0_jump_nt_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpeqi_tp0_jump_t_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpeqi_tp0_jump_t_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpeqi_tp1_jump_nt_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpeqi_tp1_jump_nt_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpeqi_tp1_jump_t_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpeqi_tp1_jump_t_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpeqn1_f_jumpnv_nt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpeqn1_f_jumpnv_t, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpeqn1_fp0_jump_nt_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpeqn1_fp0_jump_nt_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpeqn1_fp0_jump_t_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpeqn1_fp0_jump_t_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpeqn1_fp1_jump_nt_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpeqn1_fp1_jump_nt_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpeqn1_fp1_jump_t_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpeqn1_fp1_jump_t_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpeqn1_t_jumpnv_nt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpeqn1_t_jumpnv_t, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpeqn1_tp0_jump_nt_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpeqn1_tp0_jump_nt_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpeqn1_tp0_jump_t_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpeqn1_tp0_jump_t_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpeqn1_tp1_jump_nt_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpeqn1_tp1_jump_nt_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpeqn1_tp1_jump_t_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpeqn1_tp1_jump_t_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgt_f_jumpnv_nt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgt_f_jumpnv_t, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgt_fp0_jump_nt_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgt_fp0_jump_nt_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgt_fp0_jump_t_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgt_fp0_jump_t_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgt_fp1_jump_nt_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgt_fp1_jump_nt_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgt_fp1_jump_t_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgt_fp1_jump_t_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgt_t_jumpnv_nt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgt_t_jumpnv_t, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgt_tp0_jump_nt_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgt_tp0_jump_nt_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgt_tp0_jump_t_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgt_tp0_jump_t_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgt_tp1_jump_nt_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgt_tp1_jump_nt_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgt_tp1_jump_t_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgt_tp1_jump_t_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgti_f_jumpnv_nt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgti_f_jumpnv_t, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgti_fp0_jump_nt_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgti_fp0_jump_nt_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgti_fp0_jump_t_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgti_fp0_jump_t_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgti_fp1_jump_nt_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgti_fp1_jump_nt_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgti_fp1_jump_t_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgti_fp1_jump_t_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgti_t_jumpnv_nt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgti_t_jumpnv_t, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgti_tp0_jump_nt_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgti_tp0_jump_nt_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgti_tp0_jump_t_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgti_tp0_jump_t_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgti_tp1_jump_nt_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgti_tp1_jump_nt_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgti_tp1_jump_t_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgti_tp1_jump_t_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgtn1_f_jumpnv_nt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgtn1_f_jumpnv_t, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgtn1_fp0_jump_nt_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgtn1_fp0_jump_nt_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgtn1_fp0_jump_t_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgtn1_fp0_jump_t_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgtn1_fp1_jump_nt_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgtn1_fp1_jump_nt_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgtn1_fp1_jump_t_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgtn1_fp1_jump_t_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgtn1_t_jumpnv_nt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgtn1_t_jumpnv_t, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgtn1_tp0_jump_nt_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgtn1_tp0_jump_nt_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgtn1_tp0_jump_t_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgtn1_tp0_jump_t_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgtn1_tp1_jump_nt_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgtn1_tp1_jump_nt_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgtn1_tp1_jump_t_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgtn1_tp1_jump_t_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgtu_f_jumpnv_nt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgtu_f_jumpnv_t, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgtu_fp0_jump_nt_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgtu_fp0_jump_nt_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgtu_fp0_jump_t_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgtu_fp0_jump_t_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgtu_fp1_jump_nt_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgtu_fp1_jump_nt_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgtu_fp1_jump_t_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgtu_fp1_jump_t_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgtu_t_jumpnv_nt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgtu_t_jumpnv_t, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgtu_tp0_jump_nt_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgtu_tp0_jump_nt_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgtu_tp0_jump_t_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgtu_tp0_jump_t_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgtu_tp1_jump_nt_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgtu_tp1_jump_nt_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgtu_tp1_jump_t_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgtu_tp1_jump_t_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgtui_f_jumpnv_nt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgtui_f_jumpnv_t, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgtui_fp0_jump_nt_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgtui_fp0_jump_nt_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgtui_fp0_jump_t_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgtui_fp0_jump_t_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgtui_fp1_jump_nt_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgtui_fp1_jump_nt_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgtui_fp1_jump_t_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgtui_fp1_jump_t_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgtui_t_jumpnv_nt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgtui_t_jumpnv_t, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgtui_tp0_jump_nt_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgtui_tp0_jump_nt_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgtui_tp0_jump_t_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgtui_tp0_jump_t_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgtui_tp1_jump_nt_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgtui_tp1_jump_nt_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpgtui_tp1_jump_t_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_cmpgtui_tp1_jump_t_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmplt_f_jumpnv_nt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmplt_f_jumpnv_t, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmplt_t_jumpnv_nt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmplt_t_jumpnv_t, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpltu_f_jumpnv_nt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpltu_f_jumpnv_t, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpltu_t_jumpnv_nt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_cmpltu_t_jumpnv_t, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_hintjumpr, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_jumpseti, HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_jumpsetr, HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_tstbit0_f_jumpnv_nt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_tstbit0_f_jumpnv_t, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_tstbit0_fp0_jump_nt_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_tstbit0_fp0_jump_nt_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_tstbit0_fp0_jump_t_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_tstbit0_fp0_jump_t_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_tstbit0_fp1_jump_nt_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_tstbit0_fp1_jump_nt_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_tstbit0_fp1_jump_t_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_tstbit0_fp1_jump_t_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_tstbit0_t_jumpnv_nt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_tstbit0_t_jumpnv_t, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_tstbit0_tp0_jump_nt_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_tstbit0_tp0_jump_nt_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_tstbit0_tp0_jump_t_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_tstbit0_tp0_jump_t_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_tstbit0_tp1_jump_nt_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_tstbit0_tp1_jump_nt_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_j4_tstbit0_tp1_jump_t_part0, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)hex_il_op_j4_tstbit0_tp1_jump_t_part1, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_deallocframe, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadalignb_io, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadalignb_pbr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadalignb_pci, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadalignb_pcr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadalignb_pi, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadalignb_pr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadalignh_io, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadalignh_pbr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadalignh_pci, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadalignh_pcr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadalignh_pi, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadalignh_pr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadbsw2_io, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadbsw2_pbr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadbsw2_pci, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadbsw2_pcr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadbsw2_pi, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadbsw2_pr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadbsw4_io, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadbsw4_pbr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadbsw4_pci, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadbsw4_pcr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadbsw4_pi, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadbsw4_pr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadbzw2_io, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadbzw2_pbr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadbzw2_pci, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadbzw2_pcr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadbzw2_pi, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadbzw2_pr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadbzw4_io, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadbzw4_pbr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadbzw4_pci, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadbzw4_pcr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadbzw4_pi, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadbzw4_pr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadrb_io, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadrb_pbr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadrb_pci, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadrb_pcr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadrb_pi, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadrb_pr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadrbgp, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadrd_io, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadrd_pbr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadrd_pci, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadrd_pcr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadrd_pi, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadrd_pr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadrdgp, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadrh_io, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadrh_pbr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadrh_pci, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadrh_pcr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadrh_pi, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadrh_pr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadrhgp, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadri_io, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadri_pbr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadri_pci, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadri_pcr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadri_pi, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadri_pr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadrigp, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadrub_io, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadrub_pbr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadrub_pci, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadrub_pcr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadrub_pi, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadrub_pr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadrubgp, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadruh_io, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadruh_pbr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadruh_pci, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadruh_pcr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadruh_pi, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadruh_pr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadruhgp, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadw_aq, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_loadw_locked, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrbf_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrbf_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrbfnew_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrbfnew_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrbt_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrbt_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrbtnew_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrbtnew_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrdf_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrdf_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrdfnew_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrdfnew_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrdt_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrdt_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrdtnew_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrdtnew_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrhf_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrhf_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrhfnew_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrhfnew_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrht_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrht_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrhtnew_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrhtnew_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrif_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrif_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrifnew_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrifnew_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrit_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrit_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadritnew_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadritnew_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrubf_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrubf_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrubfnew_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrubfnew_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrubt_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrubt_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrubtnew_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadrubtnew_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadruhf_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadruhf_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadruhfnew_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadruhfnew_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadruht_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadruht_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadruhtnew_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l2_ploadruhtnew_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_add_memopb_io, HEX_IL_INSN_ATTR_MEM_WRITE | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_add_memoph_io, HEX_IL_INSN_ATTR_MEM_WRITE | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_add_memopw_io, HEX_IL_INSN_ATTR_MEM_WRITE | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_and_memopb_io, HEX_IL_INSN_ATTR_MEM_WRITE | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_and_memoph_io, HEX_IL_INSN_ATTR_MEM_WRITE | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_and_memopw_io, HEX_IL_INSN_ATTR_MEM_WRITE | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_iadd_memopb_io, HEX_IL_INSN_ATTR_MEM_WRITE | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_iadd_memoph_io, HEX_IL_INSN_ATTR_MEM_WRITE | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_iadd_memopw_io, HEX_IL_INSN_ATTR_MEM_WRITE | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_iand_memopb_io, HEX_IL_INSN_ATTR_MEM_WRITE | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_iand_memoph_io, HEX_IL_INSN_ATTR_MEM_WRITE | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_iand_memopw_io, HEX_IL_INSN_ATTR_MEM_WRITE | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ior_memopb_io, HEX_IL_INSN_ATTR_MEM_WRITE | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ior_memoph_io, HEX_IL_INSN_ATTR_MEM_WRITE | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ior_memopw_io, HEX_IL_INSN_ATTR_MEM_WRITE | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_isub_memopb_io, HEX_IL_INSN_ATTR_MEM_WRITE | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_isub_memoph_io, HEX_IL_INSN_ATTR_MEM_WRITE | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_isub_memopw_io, HEX_IL_INSN_ATTR_MEM_WRITE | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_loadalignb_ap, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_loadalignb_ur, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_loadalignh_ap, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_loadalignh_ur, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_loadbsw2_ap, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_loadbsw2_ur, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_loadbsw4_ap, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_loadbsw4_ur, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_loadbzw2_ap, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_loadbzw2_ur, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_loadbzw4_ap, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_loadbzw4_ur, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_loadd_aq, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_loadd_locked, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_loadrb_ap, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_loadrb_rr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_loadrb_ur, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_loadrd_ap, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_loadrd_rr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_loadrd_ur, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_loadrh_ap, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_loadrh_rr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_loadrh_ur, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_loadri_ap, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_loadri_rr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_loadri_ur, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_loadrub_ap, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_loadrub_rr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_loadrub_ur, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_loadruh_ap, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_loadruh_rr, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_loadruh_ur, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_or_memopb_io, HEX_IL_INSN_ATTR_MEM_WRITE | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_or_memoph_io, HEX_IL_INSN_ATTR_MEM_WRITE | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_or_memopw_io, HEX_IL_INSN_ATTR_MEM_WRITE | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrbf_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrbf_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrbfnew_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrbfnew_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrbt_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrbt_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrbtnew_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrbtnew_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrdf_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrdf_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrdfnew_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrdfnew_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrdt_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrdt_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrdtnew_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrdtnew_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrhf_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrhf_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrhfnew_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrhfnew_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrht_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrht_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrhtnew_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrhtnew_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrif_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrif_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrifnew_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrifnew_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrit_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrit_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadritnew_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadritnew_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrubf_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrubf_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrubfnew_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrubfnew_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrubt_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrubt_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrubtnew_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadrubtnew_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadruhf_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadruhf_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadruhfnew_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadruhfnew_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadruht_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadruht_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadruhtnew_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_ploadruhtnew_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_return, HEX_IL_INSN_ATTR_MEM_READ | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_return_f, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_return_fnew_pnt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_return_fnew_pt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_return_t, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_return_tnew_pnt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_return_tnew_pt, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_sub_memopb_io, HEX_IL_INSN_ATTR_MEM_WRITE | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_sub_memoph_io, HEX_IL_INSN_ATTR_MEM_WRITE | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l4_sub_memopw_io, HEX_IL_INSN_ATTR_MEM_WRITE | HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_l6_memcpy, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_acci, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_accii, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_cmaci_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_cmacr_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_cmacs_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_cmacs_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_cmacsc_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_cmacsc_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_cmpyi_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_cmpyr_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_cmpyrs_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_cmpyrs_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_cmpyrsc_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_cmpyrsc_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_cmpys_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_cmpys_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_cmpysc_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_cmpysc_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_cnacs_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_cnacs_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_cnacsc_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_cnacsc_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_dpmpyss_acc_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_dpmpyss_nac_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_dpmpyss_rnd_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_dpmpyss_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_dpmpyuu_acc_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_dpmpyuu_nac_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_dpmpyuu_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_hmmpyh_rs1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_hmmpyh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_hmmpyl_rs1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_hmmpyl_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_maci, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_macsin, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_macsip, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mmachs_rs0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mmachs_rs1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mmachs_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mmachs_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mmacls_rs0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mmacls_rs1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mmacls_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mmacls_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mmacuhs_rs0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mmacuhs_rs1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mmacuhs_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mmacuhs_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mmaculs_rs0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mmaculs_rs1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mmaculs_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mmaculs_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mmpyh_rs0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mmpyh_rs1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mmpyh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mmpyh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mmpyl_rs0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mmpyl_rs1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mmpyl_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mmpyl_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mmpyuh_rs0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mmpyuh_rs1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mmpyuh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mmpyuh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mmpyul_rs0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mmpyul_rs1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mmpyul_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mmpyul_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mnaci, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_acc_hh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_acc_hh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_acc_hl_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_acc_hl_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_acc_lh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_acc_lh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_acc_ll_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_acc_ll_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_acc_sat_hh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_acc_sat_hh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_acc_sat_hl_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_acc_sat_hl_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_acc_sat_lh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_acc_sat_lh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_acc_sat_ll_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_acc_sat_ll_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_hh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_hh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_hl_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_hl_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_lh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_lh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_ll_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_ll_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_nac_hh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_nac_hh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_nac_hl_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_nac_hl_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_nac_lh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_nac_lh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_nac_ll_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_nac_ll_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_nac_sat_hh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_nac_sat_hh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_nac_sat_hl_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_nac_sat_hl_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_nac_sat_lh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_nac_sat_lh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_nac_sat_ll_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_nac_sat_ll_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_rnd_hh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_rnd_hh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_rnd_hl_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_rnd_hl_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_rnd_lh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_rnd_lh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_rnd_ll_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_rnd_ll_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_sat_hh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_sat_hh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_sat_hl_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_sat_hl_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_sat_lh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_sat_lh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_sat_ll_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_sat_ll_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_sat_rnd_hh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_sat_rnd_hh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_sat_rnd_hl_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_sat_rnd_hl_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_sat_rnd_lh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_sat_rnd_lh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_sat_rnd_ll_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_sat_rnd_ll_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_up, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_up_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpy_up_s1_sat, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyd_acc_hh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyd_acc_hh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyd_acc_hl_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyd_acc_hl_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyd_acc_lh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyd_acc_lh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyd_acc_ll_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyd_acc_ll_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyd_hh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyd_hh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyd_hl_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyd_hl_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyd_lh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyd_lh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyd_ll_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyd_ll_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyd_nac_hh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyd_nac_hh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyd_nac_hl_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyd_nac_hl_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyd_nac_lh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyd_nac_lh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyd_nac_ll_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyd_nac_ll_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyd_rnd_hh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyd_rnd_hh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyd_rnd_hl_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyd_rnd_hl_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyd_rnd_lh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyd_rnd_lh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyd_rnd_ll_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyd_rnd_ll_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyi, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpysin, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpysip, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpysu_up, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyu_acc_hh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyu_acc_hh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyu_acc_hl_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyu_acc_hl_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyu_acc_lh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyu_acc_lh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyu_acc_ll_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyu_acc_ll_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyu_hh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyu_hh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyu_hl_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyu_hl_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyu_lh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyu_lh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyu_ll_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyu_ll_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyu_nac_hh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyu_nac_hh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyu_nac_hl_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyu_nac_hl_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyu_nac_lh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyu_nac_lh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyu_nac_ll_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyu_nac_ll_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyu_up, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyud_acc_hh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyud_acc_hh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyud_acc_hl_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyud_acc_hl_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyud_acc_lh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyud_acc_lh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyud_acc_ll_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyud_acc_ll_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyud_hh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyud_hh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyud_hl_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyud_hl_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyud_lh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyud_lh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyud_ll_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyud_ll_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyud_nac_hh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyud_nac_hh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyud_nac_hl_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyud_nac_hl_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyud_nac_lh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyud_nac_lh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyud_nac_ll_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_mpyud_nac_ll_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_nacci, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_naccii, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_subacc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vabsdiffh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vabsdiffw, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vcmac_s0_sat_i, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vcmac_s0_sat_r, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vcmpy_s0_sat_i, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vcmpy_s0_sat_r, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vcmpy_s1_sat_i, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vcmpy_s1_sat_r, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vdmacs_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vdmacs_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vdmpyrs_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vdmpyrs_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vdmpys_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vdmpys_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vmac2, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vmac2es, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vmac2es_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vmac2es_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vmac2s_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vmac2s_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vmac2su_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vmac2su_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vmpy2es_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vmpy2es_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vmpy2s_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vmpy2s_s0pack, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vmpy2s_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vmpy2s_s1pack, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vmpy2su_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vmpy2su_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vraddh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vradduh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vrcmaci_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vrcmaci_s0c, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vrcmacr_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vrcmacr_s0c, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vrcmpyi_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vrcmpyi_s0c, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vrcmpyr_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vrcmpyr_s0c, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vrcmpys_acc_s1_h, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vrcmpys_acc_s1_l, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vrcmpys_s1_h, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vrcmpys_s1_l, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vrcmpys_s1rp_h, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vrcmpys_s1rp_l, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vrmac_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_vrmpy_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m2_xor_xacc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m4_and_and, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m4_and_andn, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m4_and_or, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m4_and_xor, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m4_cmpyi_wh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m4_cmpyi_whc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m4_cmpyr_wh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m4_cmpyr_whc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m4_mac_up_s1_sat, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m4_mpyri_addi, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m4_mpyri_addr, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m4_mpyri_addr_u2, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m4_mpyrr_addi, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m4_mpyrr_addr, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m4_nac_up_s1_sat, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m4_or_and, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m4_or_andn, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m4_or_or, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m4_or_xor, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m4_pmpyw, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m4_pmpyw_acc, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m4_vpmpyh, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m4_vpmpyh_acc, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m4_vrmpyeh_acc_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m4_vrmpyeh_acc_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m4_vrmpyeh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m4_vrmpyeh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m4_vrmpyoh_acc_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m4_vrmpyoh_acc_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m4_vrmpyoh_s0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m4_vrmpyoh_s1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m4_xor_and, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m4_xor_andn, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m4_xor_or, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m4_xor_xacc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m5_vdmacbsu, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m5_vdmpybsu, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m5_vmacbsu, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m5_vmacbuu, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m5_vmpybsu, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m5_vmpybuu, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m5_vrmacbsu, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m5_vrmacbuu, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m5_vrmpybsu, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m5_vrmpybuu, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m6_vabsdiffb, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m6_vabsdiffub, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m7_dcmpyiw, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m7_dcmpyiw_acc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m7_dcmpyiwc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m7_dcmpyiwc_acc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m7_dcmpyrw, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m7_dcmpyrw_acc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m7_dcmpyrwc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m7_dcmpyrwc_acc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m7_wcmpyiw, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m7_wcmpyiw_rnd, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m7_wcmpyiwc, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m7_wcmpyiwc_rnd, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m7_wcmpyrw, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m7_wcmpyrw_rnd, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m7_wcmpyrwc, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_m7_wcmpyrwc_rnd, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_r6_release_at_vi, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_r6_release_st_vi, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_addasl_rrri, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_allocframe, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asl_i_p, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asl_i_p_acc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asl_i_p_and, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asl_i_p_nac, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asl_i_p_or, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asl_i_p_xacc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asl_i_r, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asl_i_r_acc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asl_i_r_and, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asl_i_r_nac, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asl_i_r_or, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asl_i_r_sat, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asl_i_r_xacc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asl_i_vh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asl_i_vw, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asl_r_p, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asl_r_p_acc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asl_r_p_and, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asl_r_p_nac, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asl_r_p_or, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asl_r_p_xor, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asl_r_r, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asl_r_r_acc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asl_r_r_and, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asl_r_r_nac, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asl_r_r_or, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asl_r_r_sat, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asl_r_vh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asl_r_vw, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asr_i_p, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asr_i_p_acc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asr_i_p_and, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asr_i_p_nac, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asr_i_p_or, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asr_i_p_rnd, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asr_i_r, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asr_i_r_acc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asr_i_r_and, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asr_i_r_nac, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asr_i_r_or, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asr_i_r_rnd, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asr_i_svw_trun, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asr_i_vh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asr_i_vw, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asr_r_p, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asr_r_p_acc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asr_r_p_and, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asr_r_p_nac, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asr_r_p_or, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asr_r_p_xor, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asr_r_r, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asr_r_r_acc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asr_r_r_and, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asr_r_r_nac, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asr_r_r_or, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asr_r_r_sat, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asr_r_svw_trun, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asr_r_vh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_asr_r_vw, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_brev, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_brevp, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_cabacdecbin, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_cl0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_cl0p, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_cl1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_cl1p, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_clb, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_clbnorm, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_clbp, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_clrbit_i, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_clrbit_r, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_ct0, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_ct0p, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_ct1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_ct1p, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_deinterleave, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_extractu, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_extractu_rp, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_extractup, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_extractup_rp, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_insert, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_insert_rp, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_insertp, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_insertp_rp, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_interleave, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lfsp, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsl_r_p, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsl_r_p_acc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsl_r_p_and, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsl_r_p_nac, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsl_r_p_or, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsl_r_p_xor, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsl_r_r, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsl_r_r_acc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsl_r_r_and, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsl_r_r_nac, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsl_r_r_or, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsl_r_vh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsl_r_vw, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsr_i_p, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsr_i_p_acc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsr_i_p_and, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsr_i_p_nac, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsr_i_p_or, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsr_i_p_xacc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsr_i_r, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsr_i_r_acc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsr_i_r_and, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsr_i_r_nac, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsr_i_r_or, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsr_i_r_xacc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsr_i_vh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsr_i_vw, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsr_r_p, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsr_r_p_acc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsr_r_p_and, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsr_r_p_nac, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsr_r_p_or, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsr_r_p_xor, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsr_r_r, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsr_r_r_acc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsr_r_r_and, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsr_r_r_nac, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsr_r_r_or, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsr_r_vh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_lsr_r_vw, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_mask, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_packhl, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_parityp, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerbf_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerbf_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerbfnew_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerbnewf_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerbnewf_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerbnewfnew_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerbnewt_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerbnewt_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerbnewtnew_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerbt_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerbt_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerbtnew_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerdf_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerdf_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerdfnew_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerdt_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerdt_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerdtnew_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerff_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerff_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerffnew_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerft_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerft_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerftnew_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerhf_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerhf_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerhfnew_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerhnewf_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerhnewf_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerhnewfnew_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerhnewt_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerhnewt_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerhnewtnew_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerht_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerht_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerhtnew_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerif_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerif_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerifnew_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerinewf_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerinewf_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerinewfnew_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerinewt_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerinewt_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerinewtnew_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerit_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstorerit_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_pstoreritnew_pi, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_setbit_i, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_setbit_r, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_shuffeb, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_shuffeh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_shuffob, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_shuffoh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerb_io, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerb_pbr, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerb_pci, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerb_pcr, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerb_pi, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerb_pr, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerbgp, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerbnew_io, HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerbnew_pbr, HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerbnew_pci, HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerbnew_pcr, HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerbnew_pi, HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerbnew_pr, HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerbnewgp, HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerd_io, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerd_pbr, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerd_pci, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerd_pcr, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerd_pi, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerd_pr, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerdgp, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerf_io, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerf_pbr, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerf_pci, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerf_pcr, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerf_pi, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerf_pr, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerfgp, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerh_io, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerh_pbr, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerh_pci, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerh_pcr, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerh_pi, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerh_pr, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerhgp, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerhnew_io, HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerhnew_pbr, HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerhnew_pci, HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerhnew_pcr, HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerhnew_pi, HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerhnew_pr, HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerhnewgp, HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storeri_io, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storeri_pbr, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storeri_pci, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storeri_pcr, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storeri_pi, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storeri_pr, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerigp, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerinew_io, HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerinew_pbr, HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerinew_pci, HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerinew_pcr, HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerinew_pi, HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerinew_pr, HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storerinewgp, HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storew_locked, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storew_rl_at_vi, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_storew_rl_st_vi, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_svsathb, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_svsathub, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_tableidxb, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_tableidxd, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_tableidxh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_tableidxw, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_togglebit_i, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_togglebit_r, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_tstbit_i, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_tstbit_r, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_valignib, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_valignrb, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_vcnegh, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_vcrotate, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_vrcnegh, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_vrndpackwh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_vrndpackwhs, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_vsathb, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_vsathb_nopack, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_vsathub, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_vsathub_nopack, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_vsatwh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_vsatwh_nopack, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_vsatwuh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_vsatwuh_nopack, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_vsplatrb, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_vsplatrh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_vspliceib, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_vsplicerb, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_vsxtbh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_vsxthw, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_vtrunehb, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_vtrunewh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_vtrunohb, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_vtrunowh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_vzxtbh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s2_vzxthw, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_addaddi, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_addi_asl_ri, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_addi_lsr_ri, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_andi_asl_ri, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_andi_lsr_ri, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_clbaddi, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_clbpaddi, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_clbpnorm, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_extract, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_extract_rp, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_extractp, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_extractp_rp, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_lsli, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_ntstbit_i, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_ntstbit_r, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_or_andi, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_or_andix, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_or_ori, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_ori_asl_ri, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_ori_lsr_ri, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_parity, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerbf_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerbf_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerbfnew_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerbfnew_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerbfnew_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerbnewf_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerbnewf_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerbnewfnew_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerbnewfnew_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerbnewfnew_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerbnewt_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerbnewt_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerbnewtnew_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerbnewtnew_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerbnewtnew_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerbt_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerbt_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerbtnew_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerbtnew_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerbtnew_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerdf_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerdf_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerdfnew_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerdfnew_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerdfnew_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerdt_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerdt_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerdtnew_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerdtnew_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerdtnew_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerff_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerff_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerffnew_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerffnew_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerffnew_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerft_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerft_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerftnew_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerftnew_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerftnew_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerhf_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerhf_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerhfnew_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerhfnew_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerhfnew_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerhnewf_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerhnewf_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerhnewfnew_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerhnewfnew_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerhnewfnew_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerhnewt_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerhnewt_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerhnewtnew_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerhnewtnew_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerhnewtnew_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerht_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerht_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerhtnew_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerhtnew_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerhtnew_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerif_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerif_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerifnew_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerifnew_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerifnew_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerinewf_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerinewf_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerinewfnew_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerinewfnew_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerinewfnew_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerinewt_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerinewt_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerinewtnew_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerinewtnew_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerinewtnew_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerit_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstorerit_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstoreritnew_abs, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstoreritnew_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_pstoreritnew_rr, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_stored_locked, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_stored_rl_at_vi, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_stored_rl_st_vi, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storeirb_io, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storeirbf_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storeirbfnew_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storeirbt_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storeirbtnew_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storeirh_io, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storeirhf_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storeirhfnew_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storeirht_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storeirhtnew_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storeiri_io, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storeirif_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storeirifnew_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storeirit_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storeiritnew_io, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storerb_ap, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storerb_rr, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storerb_ur, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storerbnew_ap, HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storerbnew_rr, HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storerbnew_ur, HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storerd_ap, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storerd_rr, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storerd_ur, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storerf_ap, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storerf_rr, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storerf_ur, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storerh_ap, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storerh_rr, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storerh_ur, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storerhnew_ap, HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storerhnew_rr, HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storerhnew_ur, HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storeri_ap, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storeri_rr, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storeri_ur, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storerinew_ap, HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storerinew_rr, HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_storerinew_ur, HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_subaddi, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_subi_asl_ri, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_subi_lsr_ri, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_vrcrotate, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_vrcrotate_acc, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_vxaddsubh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_vxaddsubhr, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_vxaddsubw, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_vxsubaddh, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_vxsubaddhr, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s4_vxsubaddw, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s5_asrhub_rnd_sat, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s5_asrhub_sat, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s5_popcountp, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s5_vasrhrnd, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s6_rol_i_p, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s6_rol_i_p_acc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s6_rol_i_p_and, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s6_rol_i_p_nac, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s6_rol_i_p_or, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s6_rol_i_p_xacc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s6_rol_i_r, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s6_rol_i_r_acc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s6_rol_i_r_and, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s6_rol_i_r_nac, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s6_rol_i_r_or, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s6_rol_i_r_xacc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s6_vsplatrbp, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s6_vtrunehb_ppp, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_s6_vtrunohb_ppp, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sa1_addi, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sa1_addrx, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sa1_addsp, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sa1_and1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sa1_clrf, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sa1_clrfnew, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sa1_clrt, HEX_IL_INSN_ATTR_COND }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sa1_clrtnew, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sa1_cmpeqi, HEX_IL_INSN_ATTR_WPRED | HEX_IL_INSN_ATTR_WRITE_P3 | HEX_IL_INSN_ATTR_WRITE_P0 | HEX_IL_INSN_ATTR_WRITE_P1 }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sa1_combine0i, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sa1_combine1i, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sa1_combine2i, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sa1_combine3i, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sa1_combinerz, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sa1_combinezr, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sa1_dec, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sa1_inc, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sa1_seti, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sa1_setin1, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sa1_sxtb, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sa1_sxth, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sa1_tfr, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sa1_zxtb, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sa1_zxth, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sl1_loadri_io, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sl1_loadrub_io, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sl2_deallocframe, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sl2_jumpr31, HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sl2_jumpr31_f, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sl2_jumpr31_fnew, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sl2_jumpr31_t, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sl2_jumpr31_tnew, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sl2_loadrb_io, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sl2_loadrd_sp, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sl2_loadrh_io, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sl2_loadri_sp, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sl2_loadruh_io, HEX_IL_INSN_ATTR_MEM_READ }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sl2_return, HEX_IL_INSN_ATTR_MEM_READ | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sl2_return_f, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sl2_return_fnew, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sl2_return_t, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_MEM_READ | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_sl2_return_tnew, HEX_IL_INSN_ATTR_COND | HEX_IL_INSN_ATTR_NEW | HEX_IL_INSN_ATTR_MEM_READ | HEX_IL_INSN_ATTR_BRANCH }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_ss1_storeb_io, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_ss1_storew_io, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_ss2_allocframe, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_ss2_storebi0, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_ss2_storebi1, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_ss2_stored_sp, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_ss2_storeh_io, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_ss2_storew_sp, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_ss2_storewi0, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_ss2_storewi1, HEX_IL_INSN_ATTR_MEM_WRITE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_undocumented_sa2_tfrsi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32ub_ai, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32ub_pi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32ub_ppu, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_ai, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_cur_ai, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_cur_npred_ai, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_cur_npred_pi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_cur_npred_ppu, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_cur_pi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_cur_ppu, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_cur_pred_ai, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_cur_pred_pi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_cur_pred_ppu, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_npred_ai, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_npred_pi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_npred_ppu, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_nt_ai, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_nt_cur_ai, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_nt_cur_npred_ai, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_nt_cur_npred_pi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_nt_cur_npred_ppu, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_nt_cur_pi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_nt_cur_ppu, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_nt_cur_pred_ai, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_nt_cur_pred_pi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_nt_cur_pred_ppu, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_nt_npred_ai, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_nt_npred_pi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_nt_npred_ppu, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_nt_pi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_nt_ppu, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_nt_pred_ai, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_nt_pred_pi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_nt_pred_ppu, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_nt_tmp_ai, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_nt_tmp_npred_ai, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_nt_tmp_npred_pi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_nt_tmp_npred_ppu, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_nt_tmp_pi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_nt_tmp_ppu, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_nt_tmp_pred_ai, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_nt_tmp_pred_pi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_nt_tmp_pred_ppu, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_pi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_ppu, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_pred_ai, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_pred_pi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_pred_ppu, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_tmp_ai, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_tmp_npred_ai, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_tmp_npred_pi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_tmp_npred_ppu, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_tmp_pi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_tmp_ppu, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_tmp_pred_ai, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_tmp_pred_pi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vl32b_tmp_pred_ppu, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32ub_ai, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32ub_npred_ai, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32ub_npred_pi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32ub_npred_ppu, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32ub_pi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32ub_ppu, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32ub_pred_ai, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32ub_pred_pi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32ub_pred_ppu, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32b_ai, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32b_npred_ai, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32b_npred_pi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32b_npred_ppu, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32b_nqpred_ai, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32b_nqpred_pi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32b_nqpred_ppu, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32b_nt_ai, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32b_nt_npred_ai, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32b_nt_npred_pi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32b_nt_npred_ppu, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32b_nt_nqpred_ai, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32b_nt_nqpred_pi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32b_nt_nqpred_ppu, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32b_nt_pi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32b_nt_ppu, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32b_nt_pred_ai, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32b_nt_pred_pi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32b_nt_pred_ppu, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32b_nt_qpred_ai, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32b_nt_qpred_pi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32b_nt_qpred_ppu, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32b_pi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32b_ppu, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32b_pred_ai, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32b_pred_pi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32b_pred_ppu, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32b_qpred_ai, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32b_qpred_pi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32b_qpred_ppu, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32b_srls_ai, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32b_srls_pi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vs32b_srls_ppu, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vabs_hf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vabs_sf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vadd_hf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vadd_hf_hf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vadd_qf16, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vadd_qf16_mix, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vadd_qf32, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vadd_qf32_mix, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vadd_sf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vadd_sf_bf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vadd_sf_hf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vadd_sf_sf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vassign_fp, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vconv_h_hf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vconv_hf_h, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vconv_hf_qf16, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vconv_hf_qf32, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vconv_sf_qf32, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vconv_sf_w, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vconv_w_sf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vcvt_b_hf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vcvt_bf_sf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vcvt_h_hf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vcvt_hf_b, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vcvt_hf_h, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vcvt_hf_sf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vcvt_hf_ub, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vcvt_hf_uh, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vcvt_sf_hf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vcvt_ub_hf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vcvt_uh_hf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vdmpy_sf_hf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vdmpy_sf_hf_acc, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vfmax_hf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vfmax_sf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vfmin_hf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vfmin_sf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vfneg_hf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vfneg_sf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vgtbf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vgtbf_and, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vgtbf_or, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vgtbf_xor, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vgthf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vgthf_and, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vgthf_or, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vgthf_xor, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vgtsf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vgtsf_and, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vgtsf_or, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vgtsf_xor, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vmax_bf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vmax_hf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vmax_sf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vmin_bf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vmin_hf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vmin_sf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vmpy_hf_hf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vmpy_hf_hf_acc, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vmpy_qf16, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vmpy_qf16_hf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vmpy_qf16_mix_hf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vmpy_qf32, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vmpy_qf32_hf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vmpy_qf32_mix_hf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vmpy_qf32_qf16, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vmpy_qf32_sf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vmpy_sf_bf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vmpy_sf_bf_acc, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vmpy_sf_hf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vmpy_sf_hf_acc, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vmpy_sf_sf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vrmpybub_rtt, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vrmpybub_rtt_acc, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vrmpyub_rtt, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vrmpyub_rtt_acc, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vrmpyzbb_rt, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vrmpyzbb_rt_acc, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vrmpyzbb_rx, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vrmpyzbb_rx_acc, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vrmpyzbub_rt, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vrmpyzbub_rt_acc, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vrmpyzbub_rx, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vrmpyzbub_rx_acc, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vrmpyzcb_rt, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vrmpyzcb_rt_acc, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vrmpyzcb_rx, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vrmpyzcb_rx_acc, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vrmpyzcbs_rt, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vrmpyzcbs_rt_acc, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vrmpyzcbs_rx, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vrmpyzcbs_rx_acc, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vrmpyznb_rt, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vrmpyznb_rt_acc, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vrmpyznb_rx, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vrmpyznb_rx_acc, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vsub_hf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vsub_hf_hf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vsub_qf16, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vsub_qf16_mix, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vsub_qf32, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vsub_qf32_mix, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vsub_sf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vsub_sf_bf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vsub_sf_hf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_vsub_sf_sf, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_zld_ai, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_zld_pi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_zld_ppu, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_zld_pred_ai, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_zld_pred_pi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_zld_pred_ppu, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_v6_zextract, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_barrier, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_break, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_ciad, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_crswap0, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_cswi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_dccleana, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_dccleanidx, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_dccleaninva, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_dccleaninvidx, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_dcfetchbo, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_dcinva, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_dcinvidx, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_dckill, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_dctagr, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_dctagw, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_dczeroa, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_getimask, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_iassignr, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_iassignw, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_icdatar, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_icdataw, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_icinva, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_icinvidx, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_ickill, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_ictagr, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_ictagw, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_isync, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_k0lock, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_k0unlock, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_l2cleaninvidx, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_l2kill, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_resume, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_setimask, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_setprio, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_start, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_stop, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_swi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_syncht, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_tfrscrr, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_tfrsrcr, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_tlblock, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_tlbp, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_tlbr, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_tlbunlock, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_tlbw, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y2_wait, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y4_crswap1, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y4_crswap10, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y4_l2fetch, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y4_l2tagr, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y4_l2tagw, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y4_nmi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y4_siad, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y4_tfrscpp, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y4_tfrspcp, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y4_trace, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y5_ctlbw, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y5_l2cleanidx, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y5_l2fetch, HEX_IL_INSN_ATTR_NONE }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y5_l2gclean, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y5_l2gcleaninv, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y5_l2gunlock, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y5_l2invidx, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y5_l2locka, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y5_l2unlocka, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y5_tlbasidi, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y5_tlboc, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y6_diag, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y6_diag0, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y6_diag1, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y6_dmlink, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y6_dmpause, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y6_dmpoll, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y6_dmresume, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y6_dmstart, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_y6_dmwait, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_dep_a2_addsat, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_dep_a2_subsat, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, + { { (HexILOpGetter)hex_il_op_dep_s2_packhl, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID }, + { (HexILOpGetter)NULL, HEX_IL_INSN_ATTR_INVALID } }, +}; +#endif \ No newline at end of file diff --git a/librz/arch/isa/hexagon/hexagon_insn.h b/librz/arch/isa/hexagon/hexagon_insn.h index eb8e49bf07f..ac8a5c07b74 100644 --- a/librz/arch/isa/hexagon/hexagon_insn.h +++ b/librz/arch/isa/hexagon/hexagon_insn.h @@ -3,7 +3,7 @@ // LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c // LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) -// Date of code generation: 2023-11-15 11:30:41-05:00 +// Date of code generation: 2024-03-16 06:22:39-05:00 //======================================== // The following code is generated. // Do not edit. Repository of code generator: @@ -406,6 +406,13 @@ typedef enum { HEX_INS_G4_TFRGCRR, HEX_INS_G4_TFRGPCP, HEX_INS_G4_TFRGRCR, + HEX_INS_IMPORTED_RD_SS, + HEX_INS_IMPORTED_RD_MEMW_PHYS_RS_RT, + HEX_INS_IMPORTED_RDD_SSS, + HEX_INS_IMPORTED_SD_RS, + HEX_INS_IMPORTED_SDD_RSS, + HEX_INS_IMPORTED_L2GCLEAN_RTT, + HEX_INS_IMPORTED_L2GCLEANINV_RTT, HEX_INS_J2_CALL, HEX_INS_J2_CALLF, HEX_INS_J2_CALLR, @@ -1151,21 +1158,6 @@ typedef enum { HEX_INS_M7_WCMPYRW_RND, HEX_INS_M7_WCMPYRWC, HEX_INS_M7_WCMPYRWC_RND, - HEX_INS_PS_LOADRBABS, - HEX_INS_PS_LOADRDABS, - HEX_INS_PS_LOADRHABS, - HEX_INS_PS_LOADRIABS, - HEX_INS_PS_LOADRUBABS, - HEX_INS_PS_LOADRUHABS, - HEX_INS_PS_STORERBABS, - HEX_INS_PS_STORERBNEWABS, - HEX_INS_PS_STORERDABS, - HEX_INS_PS_STORERFABS, - HEX_INS_PS_STORERHABS, - HEX_INS_PS_STORERHNEWABS, - HEX_INS_PS_STORERIABS, - HEX_INS_PS_STORERINEWABS, - HEX_INS_PS_TRAP1, HEX_INS_R6_RELEASE_AT_VI, HEX_INS_R6_RELEASE_ST_VI, HEX_INS_S2_ADDASL_RRRI, @@ -1622,6 +1614,59 @@ typedef enum { HEX_INS_S6_VSPLATRBP, HEX_INS_S6_VTRUNEHB_PPP, HEX_INS_S6_VTRUNOHB_PPP, + HEX_INS_SA1_ADDI, + HEX_INS_SA1_ADDRX, + HEX_INS_SA1_ADDSP, + HEX_INS_SA1_AND1, + HEX_INS_SA1_CLRF, + HEX_INS_SA1_CLRFNEW, + HEX_INS_SA1_CLRT, + HEX_INS_SA1_CLRTNEW, + HEX_INS_SA1_CMPEQI, + HEX_INS_SA1_COMBINE0I, + HEX_INS_SA1_COMBINE1I, + HEX_INS_SA1_COMBINE2I, + HEX_INS_SA1_COMBINE3I, + HEX_INS_SA1_COMBINERZ, + HEX_INS_SA1_COMBINEZR, + HEX_INS_SA1_DEC, + HEX_INS_SA1_INC, + HEX_INS_SA1_SETI, + HEX_INS_SA1_SETIN1, + HEX_INS_SA1_SXTB, + HEX_INS_SA1_SXTH, + HEX_INS_SA1_TFR, + HEX_INS_SA1_ZXTB, + HEX_INS_SA1_ZXTH, + HEX_INS_SL1_LOADRI_IO, + HEX_INS_SL1_LOADRUB_IO, + HEX_INS_SL2_DEALLOCFRAME, + HEX_INS_SL2_JUMPR31, + HEX_INS_SL2_JUMPR31_F, + HEX_INS_SL2_JUMPR31_FNEW, + HEX_INS_SL2_JUMPR31_T, + HEX_INS_SL2_JUMPR31_TNEW, + HEX_INS_SL2_LOADRB_IO, + HEX_INS_SL2_LOADRD_SP, + HEX_INS_SL2_LOADRH_IO, + HEX_INS_SL2_LOADRI_SP, + HEX_INS_SL2_LOADRUH_IO, + HEX_INS_SL2_RETURN, + HEX_INS_SL2_RETURN_F, + HEX_INS_SL2_RETURN_FNEW, + HEX_INS_SL2_RETURN_T, + HEX_INS_SL2_RETURN_TNEW, + HEX_INS_SS1_STOREB_IO, + HEX_INS_SS1_STOREW_IO, + HEX_INS_SS2_ALLOCFRAME, + HEX_INS_SS2_STOREBI0, + HEX_INS_SS2_STOREBI1, + HEX_INS_SS2_STORED_SP, + HEX_INS_SS2_STOREH_IO, + HEX_INS_SS2_STOREW_SP, + HEX_INS_SS2_STOREWI0, + HEX_INS_SS2_STOREWI1, + HEX_INS_UNDOCUMENTED_SA2_TFRSI, HEX_INS_V6_EXTRACTW, HEX_INS_V6_LVSPLATB, HEX_INS_V6_LVSPLATH, @@ -2344,65 +2389,5 @@ typedef enum { HEX_INS_DEP_A2_ADDSAT, HEX_INS_DEP_A2_SUBSAT, HEX_INS_DEP_S2_PACKHL, - HEX_INS_IMPORTED_RD_SS, - HEX_INS_IMPORTED_RD_MEMW_PHYS_RS_RT, - HEX_INS_IMPORTED_RDD_SSS, - HEX_INS_IMPORTED_SD_RS, - HEX_INS_IMPORTED_SDD_RSS, - HEX_INS_IMPORTED_L2GCLEAN_RTT, - HEX_INS_IMPORTED_L2GCLEANINV_RTT, - HEX_INS_SA1_ADDI, - HEX_INS_SA1_ADDRX, - HEX_INS_SA1_ADDSP, - HEX_INS_SA1_AND1, - HEX_INS_SA1_CLRF, - HEX_INS_SA1_CLRFNEW, - HEX_INS_SA1_CLRT, - HEX_INS_SA1_CLRTNEW, - HEX_INS_SA1_CMPEQI, - HEX_INS_SA1_COMBINE0I, - HEX_INS_SA1_COMBINE1I, - HEX_INS_SA1_COMBINE2I, - HEX_INS_SA1_COMBINE3I, - HEX_INS_SA1_COMBINERZ, - HEX_INS_SA1_COMBINEZR, - HEX_INS_SA1_DEC, - HEX_INS_SA1_INC, - HEX_INS_SA1_SETI, - HEX_INS_SA1_SETIN1, - HEX_INS_SA1_SXTB, - HEX_INS_SA1_SXTH, - HEX_INS_SA1_TFR, - HEX_INS_SA1_ZXTB, - HEX_INS_SA1_ZXTH, - HEX_INS_SL1_LOADRI_IO, - HEX_INS_SL1_LOADRUB_IO, - HEX_INS_SL2_DEALLOCFRAME, - HEX_INS_SL2_JUMPR31, - HEX_INS_SL2_JUMPR31_F, - HEX_INS_SL2_JUMPR31_FNEW, - HEX_INS_SL2_JUMPR31_T, - HEX_INS_SL2_JUMPR31_TNEW, - HEX_INS_SL2_LOADRB_IO, - HEX_INS_SL2_LOADRD_SP, - HEX_INS_SL2_LOADRH_IO, - HEX_INS_SL2_LOADRI_SP, - HEX_INS_SL2_LOADRUH_IO, - HEX_INS_SL2_RETURN, - HEX_INS_SL2_RETURN_F, - HEX_INS_SL2_RETURN_FNEW, - HEX_INS_SL2_RETURN_T, - HEX_INS_SL2_RETURN_TNEW, - HEX_INS_SS1_STOREB_IO, - HEX_INS_SS1_STOREW_IO, - HEX_INS_SS2_ALLOCFRAME, - HEX_INS_SS2_STOREBI0, - HEX_INS_SS2_STOREBI1, - HEX_INS_SS2_STORED_SP, - HEX_INS_SS2_STOREH_IO, - HEX_INS_SS2_STOREW_SP, - HEX_INS_SS2_STOREWI0, - HEX_INS_SS2_STOREWI1, - HEX_INS_UNDOCUMENTED_SA2_TFRSI, } HexInsnID; #endif \ No newline at end of file diff --git a/librz/arch/isa/hexagon/hexagon_reg_tables.h b/librz/arch/isa/hexagon/hexagon_reg_tables.h new file mode 100644 index 00000000000..c7cbd1b4fa4 --- /dev/null +++ b/librz/arch/isa/hexagon/hexagon_reg_tables.h @@ -0,0 +1,730 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#ifndef HEXAGON_REG_TABLES_H +#define HEXAGON_REG_TABLES_H + +#include + +/** + * \brief Lookup table for register alias. + * + */ +HexRegAliasMapping hex_alias_reg_lt_v69[] = { + { HEX_REG_CLASS_CTR_REGS, HEX_REG_CTR_REGS_C0 }, // HEX_REG_ALIAS_SA0 + { HEX_REG_CLASS_CTR_REGS, HEX_REG_CTR_REGS_C1 }, // HEX_REG_ALIAS_LC0 + { HEX_REG_CLASS_CTR_REGS, HEX_REG_CTR_REGS_C2 }, // HEX_REG_ALIAS_SA1 + { HEX_REG_CLASS_CTR_REGS, HEX_REG_CTR_REGS_C3 }, // HEX_REG_ALIAS_LC1 + { HEX_REG_CLASS_CTR_REGS, HEX_REG_CTR_REGS_C4 }, // HEX_REG_ALIAS_P3_0 + { HEX_REG_CLASS_CTR_REGS, HEX_REG_CTR_REGS_C5 }, // HEX_REG_ALIAS_C5 + { HEX_REG_CLASS_CTR_REGS, HEX_REG_CTR_REGS_C6 }, // HEX_REG_ALIAS_M0 + { HEX_REG_CLASS_CTR_REGS, HEX_REG_CTR_REGS_C7 }, // HEX_REG_ALIAS_M1 + { HEX_REG_CLASS_CTR_REGS, HEX_REG_CTR_REGS_C8 }, // HEX_REG_ALIAS_USR + { HEX_REG_CLASS_CTR_REGS, HEX_REG_CTR_REGS_C9 }, // HEX_REG_ALIAS_PC + { HEX_REG_CLASS_CTR_REGS, HEX_REG_CTR_REGS_C10 }, // HEX_REG_ALIAS_UGP + { HEX_REG_CLASS_CTR_REGS, HEX_REG_CTR_REGS_C11 }, // HEX_REG_ALIAS_GP + { HEX_REG_CLASS_CTR_REGS, HEX_REG_CTR_REGS_C12 }, // HEX_REG_ALIAS_CS0 + { HEX_REG_CLASS_CTR_REGS, HEX_REG_CTR_REGS_C13 }, // HEX_REG_ALIAS_CS1 + { HEX_REG_CLASS_CTR_REGS, HEX_REG_CTR_REGS_C14 }, // HEX_REG_ALIAS_UPCYCLELO + { HEX_REG_CLASS_CTR_REGS, HEX_REG_CTR_REGS_C15 }, // HEX_REG_ALIAS_UPCYCLEHI + { HEX_REG_CLASS_CTR_REGS, HEX_REG_CTR_REGS_C16 }, // HEX_REG_ALIAS_FRAMELIMIT + { HEX_REG_CLASS_CTR_REGS, HEX_REG_CTR_REGS_C17 }, // HEX_REG_ALIAS_FRAMEKEY + { HEX_REG_CLASS_CTR_REGS, HEX_REG_CTR_REGS_C18 }, // HEX_REG_ALIAS_PKTCOUNTLO + { HEX_REG_CLASS_CTR_REGS, HEX_REG_CTR_REGS_C19 }, // HEX_REG_ALIAS_PKTCOUNTHI + { HEX_REG_CLASS_CTR_REGS, HEX_REG_CTR_REGS_C20 }, // HEX_REG_ALIAS_C20 + { HEX_REG_CLASS_CTR_REGS, HEX_REG_CTR_REGS_C30 }, // HEX_REG_ALIAS_UTIMERLO + { HEX_REG_CLASS_CTR_REGS, HEX_REG_CTR_REGS_C31 }, // HEX_REG_ALIAS_UTIMERHI + { HEX_REG_CLASS_CTR_REGS64, HEX_REG_CTR_REGS64_C1_0 }, // HEX_REG_ALIAS_LC0_SA0 + { HEX_REG_CLASS_CTR_REGS64, HEX_REG_CTR_REGS64_C3_2 }, // HEX_REG_ALIAS_LC1_SA1 + { HEX_REG_CLASS_CTR_REGS64, HEX_REG_CTR_REGS64_C7_6 }, // HEX_REG_ALIAS_M1_0 + { HEX_REG_CLASS_CTR_REGS64, HEX_REG_CTR_REGS64_C13_12 }, // HEX_REG_ALIAS_CS1_0 + { HEX_REG_CLASS_CTR_REGS64, HEX_REG_CTR_REGS64_C15_14 }, // HEX_REG_ALIAS_UPCYCLE + { HEX_REG_CLASS_CTR_REGS64, HEX_REG_CTR_REGS64_C19_18 }, // HEX_REG_ALIAS_PKTCOUNT + { HEX_REG_CLASS_CTR_REGS64, HEX_REG_CTR_REGS64_C31_30 }, // HEX_REG_ALIAS_UTIMER + { HEX_REG_CLASS_DOUBLE_REGS, HEX_REG_DOUBLE_REGS_R31_30 }, // HEX_REG_ALIAS_LR_FP + { HEX_REG_CLASS_GUEST_REGS, HEX_REG_GUEST_REGS_G0 }, // HEX_REG_ALIAS_GELR + { HEX_REG_CLASS_GUEST_REGS, HEX_REG_GUEST_REGS_G1 }, // HEX_REG_ALIAS_GSR + { HEX_REG_CLASS_GUEST_REGS, HEX_REG_GUEST_REGS_G2 }, // HEX_REG_ALIAS_GOSP + { HEX_REG_CLASS_GUEST_REGS, HEX_REG_GUEST_REGS_G3 }, // HEX_REG_ALIAS_GBADVA + { HEX_REG_CLASS_GUEST_REGS, HEX_REG_GUEST_REGS_G16 }, // HEX_REG_ALIAS_GPMUCNT4 + { HEX_REG_CLASS_GUEST_REGS, HEX_REG_GUEST_REGS_G17 }, // HEX_REG_ALIAS_GPMUCNT5 + { HEX_REG_CLASS_GUEST_REGS, HEX_REG_GUEST_REGS_G18 }, // HEX_REG_ALIAS_GPMUCNT6 + { HEX_REG_CLASS_GUEST_REGS, HEX_REG_GUEST_REGS_G19 }, // HEX_REG_ALIAS_GPMUCNT7 + { HEX_REG_CLASS_GUEST_REGS, HEX_REG_GUEST_REGS_G24 }, // HEX_REG_ALIAS_GPCYCLELO + { HEX_REG_CLASS_GUEST_REGS, HEX_REG_GUEST_REGS_G25 }, // HEX_REG_ALIAS_GPCYCLEHI + { HEX_REG_CLASS_GUEST_REGS, HEX_REG_GUEST_REGS_G26 }, // HEX_REG_ALIAS_GPMUCNT0 + { HEX_REG_CLASS_GUEST_REGS, HEX_REG_GUEST_REGS_G27 }, // HEX_REG_ALIAS_GPMUCNT1 + { HEX_REG_CLASS_GUEST_REGS, HEX_REG_GUEST_REGS_G28 }, // HEX_REG_ALIAS_GPMUCNT2 + { HEX_REG_CLASS_GUEST_REGS, HEX_REG_GUEST_REGS_G29 }, // HEX_REG_ALIAS_GPMUCNT3 + { HEX_REG_CLASS_INT_REGS, HEX_REG_INT_REGS_R29 }, // HEX_REG_ALIAS_SP + { HEX_REG_CLASS_INT_REGS, HEX_REG_INT_REGS_R30 }, // HEX_REG_ALIAS_FP + { HEX_REG_CLASS_INT_REGS, HEX_REG_INT_REGS_R31 }, // HEX_REG_ALIAS_LR + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S0 }, // HEX_REG_ALIAS_SGP0 + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S1 }, // HEX_REG_ALIAS_SGP1 + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S2 }, // HEX_REG_ALIAS_STID + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S3 }, // HEX_REG_ALIAS_ELR + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S4 }, // HEX_REG_ALIAS_BADVA0 + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S5 }, // HEX_REG_ALIAS_BADVA1 + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S6 }, // HEX_REG_ALIAS_SSR + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S7 }, // HEX_REG_ALIAS_CCR + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S8 }, // HEX_REG_ALIAS_HTID + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S9 }, // HEX_REG_ALIAS_BADVA + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S10 }, // HEX_REG_ALIAS_IMASK + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S16 }, // HEX_REG_ALIAS_EVB + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S17 }, // HEX_REG_ALIAS_MODECTL + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S18 }, // HEX_REG_ALIAS_SYSCFG + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S19 }, // HEX_REG_ALIAS_S19 + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S20 }, // HEX_REG_ALIAS_S20 + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S21 }, // HEX_REG_ALIAS_VID + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S22 }, // HEX_REG_ALIAS_S22 + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S27 }, // HEX_REG_ALIAS_CFGBASE + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S28 }, // HEX_REG_ALIAS_DIAG + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S29 }, // HEX_REG_ALIAS_REV + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S30 }, // HEX_REG_ALIAS_PCYCLELO + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S31 }, // HEX_REG_ALIAS_PCYCLEHI + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S32 }, // HEX_REG_ALIAS_ISDBST + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S33 }, // HEX_REG_ALIAS_ISDBCFG0 + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S34 }, // HEX_REG_ALIAS_ISDBCFG1 + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S36 }, // HEX_REG_ALIAS_BRKPTPC0 + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S37 }, // HEX_REG_ALIAS_BRKPTCFG0 + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S38 }, // HEX_REG_ALIAS_BRKPTPC1 + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S39 }, // HEX_REG_ALIAS_BRKPTCFG1 + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S40 }, // HEX_REG_ALIAS_ISDBMBXIN + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S41 }, // HEX_REG_ALIAS_ISDBMBXOUT + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S42 }, // HEX_REG_ALIAS_ISDBEN + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S43 }, // HEX_REG_ALIAS_ISDBGPR + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S48 }, // HEX_REG_ALIAS_PMUCNT0 + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S49 }, // HEX_REG_ALIAS_PMUCNT1 + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S50 }, // HEX_REG_ALIAS_PMUCNT2 + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S51 }, // HEX_REG_ALIAS_PMUCNT3 + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S52 }, // HEX_REG_ALIAS_PMUEVTCFG + { HEX_REG_CLASS_SYS_REGS, HEX_REG_SYS_REGS_S53 }, // HEX_REG_ALIAS_PMUCFG + { HEX_REG_CLASS_SYS_REGS64, HEX_REG_SYS_REGS64_S1_0 }, // HEX_REG_ALIAS_SGP1_0 + { HEX_REG_CLASS_SYS_REGS64, HEX_REG_SYS_REGS64_S5_4 }, // HEX_REG_ALIAS_BADVA1_0 + { HEX_REG_CLASS_SYS_REGS64, HEX_REG_SYS_REGS64_S7_6 }, // HEX_REG_ALIAS_CCR_SSR + { HEX_REG_CLASS_SYS_REGS64, HEX_REG_SYS_REGS64_S31_30 }, // HEX_REG_ALIAS_PCYCLE +}; + +/** + * \brief Lookup table for register names and alias of class CtrRegs. + */ +HexRegNames hexagon_ctrregs_lt_v69[] = { + { "C0", "SA0", "C0_tmp", "sa0_tmp" }, // HEX_REG_CTR_REGS_C0 + { "C1", "LC0", "C1_tmp", "lc0_tmp" }, // HEX_REG_CTR_REGS_C1 + { "C2", "SA1", "C2_tmp", "sa1_tmp" }, // HEX_REG_CTR_REGS_C2 + { "C3", "LC1", "C3_tmp", "lc1_tmp" }, // HEX_REG_CTR_REGS_C3 + { "C4", "P3:0", "C4_tmp", "p3:0_tmp" }, // HEX_REG_CTR_REGS_C4 + { "C5", "C5", "C5_tmp", "c5_tmp" }, // HEX_REG_CTR_REGS_C5 + { "C6", "M0", "C6_tmp", "m0_tmp" }, // HEX_REG_CTR_REGS_C6 + { "C7", "M1", "C7_tmp", "m1_tmp" }, // HEX_REG_CTR_REGS_C7 + { "C8", "USR", "C8_tmp", "usr_tmp" }, // HEX_REG_CTR_REGS_C8 + { "C9", "PC", "C9_tmp", "pc_tmp" }, // HEX_REG_CTR_REGS_C9 + { "C10", "UGP", "C10_tmp", "ugp_tmp" }, // HEX_REG_CTR_REGS_C10 + { "C11", "GP", "C11_tmp", "gp_tmp" }, // HEX_REG_CTR_REGS_C11 + { "C12", "CS0", "C12_tmp", "cs0_tmp" }, // HEX_REG_CTR_REGS_C12 + { "C13", "CS1", "C13_tmp", "cs1_tmp" }, // HEX_REG_CTR_REGS_C13 + { "C14", "UPCYCLELO", "C14_tmp", "upcyclelo_tmp" }, // HEX_REG_CTR_REGS_C14 + { "C15", "UPCYCLEHI", "C15_tmp", "upcyclehi_tmp" }, // HEX_REG_CTR_REGS_C15 + { "C16", "FRAMELIMIT", "C16_tmp", "framelimit_tmp" }, // HEX_REG_CTR_REGS_C16 + { "C17", "FRAMEKEY", "C17_tmp", "framekey_tmp" }, // HEX_REG_CTR_REGS_C17 + { "C18", "PKTCOUNTLO", "C18_tmp", "pktcountlo_tmp" }, // HEX_REG_CTR_REGS_C18 + { "C19", "PKTCOUNTHI", "C19_tmp", "pktcounthi_tmp" }, // HEX_REG_CTR_REGS_C19 + { "C20", "C20", "C20_tmp", "C20_tmp" }, // HEX_REG_CTR_REGS_C20 + { "C21", "C21", "C21_tmp", "c21_tmp" }, // HEX_REG_CTR_REGS_C21 + { "C22", "C22", "C22_tmp", "c22_tmp" }, // HEX_REG_CTR_REGS_C22 + { "C23", "C23", "C23_tmp", "c23_tmp" }, // HEX_REG_CTR_REGS_C23 + { "C24", "C24", "C24_tmp", "c24_tmp" }, // HEX_REG_CTR_REGS_C24 + { "C25", "C25", "C25_tmp", "c25_tmp" }, // HEX_REG_CTR_REGS_C25 + { "C26", "C26", "C26_tmp", "c26_tmp" }, // HEX_REG_CTR_REGS_C26 + { "C27", "C27", "C27_tmp", "c27_tmp" }, // HEX_REG_CTR_REGS_C27 + { "C28", "C28", "C28_tmp", "c28_tmp" }, // HEX_REG_CTR_REGS_C28 + { "C29", "C29", "C29_tmp", "c29_tmp" }, // HEX_REG_CTR_REGS_C29 + { "C30", "UTIMERLO", "C30_tmp", "utimerlo_tmp" }, // HEX_REG_CTR_REGS_C30 + { "C31", "UTIMERHI", "C31_tmp", "utimerhi_tmp" }, // HEX_REG_CTR_REGS_C31 +}; + +/** + * \brief Lookup table for register names and alias of class CtrRegs64. + */ +HexRegNames hexagon_ctrregs64_lt_v69[] = { + { "C1:0", "LC0:SA0", "C1:0_tmp", "lc0:sa0_tmp" }, // HEX_REG_CTR_REGS64_C1_0 + { NULL, NULL, NULL, NULL }, // - + { "C3:2", "LC1:SA1", "C3:2_tmp", "lc1:sa1_tmp" }, // HEX_REG_CTR_REGS64_C3_2 + { NULL, NULL, NULL, NULL }, // - + { "C5:4", "C5:4", "C5:4_tmp", "c5:4_tmp" }, // HEX_REG_CTR_REGS64_C5_4 + { NULL, NULL, NULL, NULL }, // - + { "C7:6", "M1:0", "C7:6_tmp", "m1:0_tmp" }, // HEX_REG_CTR_REGS64_C7_6 + { NULL, NULL, NULL, NULL }, // - + { "C9:8", "C9:8", "C9:8_tmp", "c9:8_tmp" }, // HEX_REG_CTR_REGS64_C9_8 + { NULL, NULL, NULL, NULL }, // - + { "C11:10", "C11:10", "C11:10_tmp", "c11:10_tmp" }, // HEX_REG_CTR_REGS64_C11_10 + { NULL, NULL, NULL, NULL }, // - + { "C13:12", "CS1:0", "C13:12_tmp", "cs1:0_tmp" }, // HEX_REG_CTR_REGS64_C13_12 + { NULL, NULL, NULL, NULL }, // - + { "C15:14", "UPCYCLE", "C15:14_tmp", "upcycle_tmp" }, // HEX_REG_CTR_REGS64_C15_14 + { NULL, NULL, NULL, NULL }, // - + { "C17:16", "C17:16", "C17:16_tmp", "c17:16_tmp" }, // HEX_REG_CTR_REGS64_C17_16 + { NULL, NULL, NULL, NULL }, // - + { "C19:18", "PKTCOUNT", "C19:18_tmp", "pktcount_tmp" }, // HEX_REG_CTR_REGS64_C19_18 + { NULL, NULL, NULL, NULL }, // - + { "C21:20", "C21:20", "C21:20_tmp", "c21:20_tmp" }, // HEX_REG_CTR_REGS64_C21_20 + { NULL, NULL, NULL, NULL }, // - + { "C23:22", "C23:22", "C23:22_tmp", "c23:22_tmp" }, // HEX_REG_CTR_REGS64_C23_22 + { NULL, NULL, NULL, NULL }, // - + { "C25:24", "C25:24", "C25:24_tmp", "c25:24_tmp" }, // HEX_REG_CTR_REGS64_C25_24 + { NULL, NULL, NULL, NULL }, // - + { "C27:26", "C27:26", "C27:26_tmp", "c27:26_tmp" }, // HEX_REG_CTR_REGS64_C27_26 + { NULL, NULL, NULL, NULL }, // - + { "C29:28", "C29:28", "C29:28_tmp", "c29:28_tmp" }, // HEX_REG_CTR_REGS64_C29_28 + { NULL, NULL, NULL, NULL }, // - + { "C31:30", "UTIMER", "C31:30_tmp", "utimer_tmp" }, // HEX_REG_CTR_REGS64_C31_30 +}; + +/** + * \brief Lookup table for register names and alias of class DoubleRegs. + */ +HexRegNames hexagon_doubleregs_lt_v69[] = { + { "R1:0", "R1:0", "R1:0_tmp", "r1:0_tmp" }, // HEX_REG_DOUBLE_REGS_R1_0 + { NULL, NULL, NULL, NULL }, // - + { "R3:2", "R3:2", "R3:2_tmp", "r3:2_tmp" }, // HEX_REG_DOUBLE_REGS_R3_2 + { NULL, NULL, NULL, NULL }, // - + { "R5:4", "R5:4", "R5:4_tmp", "r5:4_tmp" }, // HEX_REG_DOUBLE_REGS_R5_4 + { NULL, NULL, NULL, NULL }, // - + { "R7:6", "R7:6", "R7:6_tmp", "r7:6_tmp" }, // HEX_REG_DOUBLE_REGS_R7_6 + { NULL, NULL, NULL, NULL }, // - + { "R9:8", "R9:8", "R9:8_tmp", "r9:8_tmp" }, // HEX_REG_DOUBLE_REGS_R9_8 + { NULL, NULL, NULL, NULL }, // - + { "R11:10", "R11:10", "R11:10_tmp", "r11:10_tmp" }, // HEX_REG_DOUBLE_REGS_R11_10 + { NULL, NULL, NULL, NULL }, // - + { "R13:12", "R13:12", "R13:12_tmp", "r13:12_tmp" }, // HEX_REG_DOUBLE_REGS_R13_12 + { NULL, NULL, NULL, NULL }, // - + { "R15:14", "R15:14", "R15:14_tmp", "r15:14_tmp" }, // HEX_REG_DOUBLE_REGS_R15_14 + { NULL, NULL, NULL, NULL }, // - + { "R17:16", "R17:16", "R17:16_tmp", "r17:16_tmp" }, // HEX_REG_DOUBLE_REGS_R17_16 + { NULL, NULL, NULL, NULL }, // - + { "R19:18", "R19:18", "R19:18_tmp", "r19:18_tmp" }, // HEX_REG_DOUBLE_REGS_R19_18 + { NULL, NULL, NULL, NULL }, // - + { "R21:20", "R21:20", "R21:20_tmp", "r21:20_tmp" }, // HEX_REG_DOUBLE_REGS_R21_20 + { NULL, NULL, NULL, NULL }, // - + { "R23:22", "R23:22", "R23:22_tmp", "r23:22_tmp" }, // HEX_REG_DOUBLE_REGS_R23_22 + { NULL, NULL, NULL, NULL }, // - + { "R25:24", "R25:24", "R25:24_tmp", "r25:24_tmp" }, // HEX_REG_DOUBLE_REGS_R25_24 + { NULL, NULL, NULL, NULL }, // - + { "R27:26", "R27:26", "R27:26_tmp", "r27:26_tmp" }, // HEX_REG_DOUBLE_REGS_R27_26 + { NULL, NULL, NULL, NULL }, // - + { "R29:28", "R29:28", "R29:28_tmp", "r29:28_tmp" }, // HEX_REG_DOUBLE_REGS_R29_28 + { NULL, NULL, NULL, NULL }, // - + { "R31:30", "LR:FP", "R31:30_tmp", "lr:fp_tmp" }, // HEX_REG_DOUBLE_REGS_R31_30 +}; + +/** + * \brief Lookup table for register names and alias of class GeneralDoubleLow8Regs. + */ +HexRegNames hexagon_generaldoublelow8regs_lt_v69[] = { + { "R1:0", "R1:0", "R1:0_tmp", "r1:0_tmp" }, // HEX_REG_GENERAL_DOUBLE_LOW8_REGS_R1_0 + { NULL, NULL, NULL, NULL }, // - + { "R3:2", "R3:2", "R3:2_tmp", "r3:2_tmp" }, // HEX_REG_GENERAL_DOUBLE_LOW8_REGS_R3_2 + { NULL, NULL, NULL, NULL }, // - + { "R5:4", "R5:4", "R5:4_tmp", "r5:4_tmp" }, // HEX_REG_GENERAL_DOUBLE_LOW8_REGS_R5_4 + { NULL, NULL, NULL, NULL }, // - + { "R7:6", "R7:6", "R7:6_tmp", "r7:6_tmp" }, // HEX_REG_GENERAL_DOUBLE_LOW8_REGS_R7_6 + { NULL, NULL, NULL, NULL }, // - + { NULL, NULL, NULL, NULL }, // - + { NULL, NULL, NULL, NULL }, // - + { NULL, NULL, NULL, NULL }, // - + { NULL, NULL, NULL, NULL }, // - + { NULL, NULL, NULL, NULL }, // - + { NULL, NULL, NULL, NULL }, // - + { NULL, NULL, NULL, NULL }, // - + { NULL, NULL, NULL, NULL }, // - + { "R17:16", "R17:16", "R17:16_tmp", "r17:16_tmp" }, // HEX_REG_GENERAL_DOUBLE_LOW8_REGS_R17_16 + { NULL, NULL, NULL, NULL }, // - + { "R19:18", "R19:18", "R19:18_tmp", "r19:18_tmp" }, // HEX_REG_GENERAL_DOUBLE_LOW8_REGS_R19_18 + { NULL, NULL, NULL, NULL }, // - + { "R21:20", "R21:20", "R21:20_tmp", "r21:20_tmp" }, // HEX_REG_GENERAL_DOUBLE_LOW8_REGS_R21_20 + { NULL, NULL, NULL, NULL }, // - + { "R23:22", "R23:22", "R23:22_tmp", "r23:22_tmp" }, // HEX_REG_GENERAL_DOUBLE_LOW8_REGS_R23_22 +}; + +/** + * \brief Lookup table for register names and alias of class GeneralSubRegs. + */ +HexRegNames hexagon_generalsubregs_lt_v69[] = { + { "R0", "R0", "R0_tmp", "r0_tmp" }, // HEX_REG_GENERAL_SUB_REGS_R0 + { "R1", "R1", "R1_tmp", "r1_tmp" }, // HEX_REG_GENERAL_SUB_REGS_R1 + { "R2", "R2", "R2_tmp", "r2_tmp" }, // HEX_REG_GENERAL_SUB_REGS_R2 + { "R3", "R3", "R3_tmp", "r3_tmp" }, // HEX_REG_GENERAL_SUB_REGS_R3 + { "R4", "R4", "R4_tmp", "r4_tmp" }, // HEX_REG_GENERAL_SUB_REGS_R4 + { "R5", "R5", "R5_tmp", "r5_tmp" }, // HEX_REG_GENERAL_SUB_REGS_R5 + { "R6", "R6", "R6_tmp", "r6_tmp" }, // HEX_REG_GENERAL_SUB_REGS_R6 + { "R7", "R7", "R7_tmp", "r7_tmp" }, // HEX_REG_GENERAL_SUB_REGS_R7 + { NULL, NULL, NULL, NULL }, // - + { NULL, NULL, NULL, NULL }, // - + { NULL, NULL, NULL, NULL }, // - + { NULL, NULL, NULL, NULL }, // - + { NULL, NULL, NULL, NULL }, // - + { NULL, NULL, NULL, NULL }, // - + { NULL, NULL, NULL, NULL }, // - + { NULL, NULL, NULL, NULL }, // - + { "R16", "R16", "R16_tmp", "r16_tmp" }, // HEX_REG_GENERAL_SUB_REGS_R16 + { "R17", "R17", "R17_tmp", "r17_tmp" }, // HEX_REG_GENERAL_SUB_REGS_R17 + { "R18", "R18", "R18_tmp", "r18_tmp" }, // HEX_REG_GENERAL_SUB_REGS_R18 + { "R19", "R19", "R19_tmp", "r19_tmp" }, // HEX_REG_GENERAL_SUB_REGS_R19 + { "R20", "R20", "R20_tmp", "r20_tmp" }, // HEX_REG_GENERAL_SUB_REGS_R20 + { "R21", "R21", "R21_tmp", "r21_tmp" }, // HEX_REG_GENERAL_SUB_REGS_R21 + { "R22", "R22", "R22_tmp", "r22_tmp" }, // HEX_REG_GENERAL_SUB_REGS_R22 + { "R23", "R23", "R23_tmp", "r23_tmp" }, // HEX_REG_GENERAL_SUB_REGS_R23 +}; + +/** + * \brief Lookup table for register names and alias of class GuestRegs. + */ +HexRegNames hexagon_guestregs_lt_v69[] = { + { "G0", "GELR", "G0_tmp", "gelr_tmp" }, // HEX_REG_GUEST_REGS_G0 + { "G1", "GSR", "G1_tmp", "gsr_tmp" }, // HEX_REG_GUEST_REGS_G1 + { "G2", "GOSP", "G2_tmp", "gosp_tmp" }, // HEX_REG_GUEST_REGS_G2 + { "G3", "GBADVA", "G3_tmp", "gbadva_tmp" }, // HEX_REG_GUEST_REGS_G3 + { "G4", "G4", "G4_tmp", "g4_tmp" }, // HEX_REG_GUEST_REGS_G4 + { "G5", "G5", "G5_tmp", "g5_tmp" }, // HEX_REG_GUEST_REGS_G5 + { "G6", "G6", "G6_tmp", "g6_tmp" }, // HEX_REG_GUEST_REGS_G6 + { "G7", "G7", "G7_tmp", "g7_tmp" }, // HEX_REG_GUEST_REGS_G7 + { "G8", "G8", "G8_tmp", "g8_tmp" }, // HEX_REG_GUEST_REGS_G8 + { "G9", "G9", "G9_tmp", "g9_tmp" }, // HEX_REG_GUEST_REGS_G9 + { "G10", "G10", "G10_tmp", "g10_tmp" }, // HEX_REG_GUEST_REGS_G10 + { "G11", "G11", "G11_tmp", "g11_tmp" }, // HEX_REG_GUEST_REGS_G11 + { "G12", "G12", "G12_tmp", "g12_tmp" }, // HEX_REG_GUEST_REGS_G12 + { "G13", "G13", "G13_tmp", "g13_tmp" }, // HEX_REG_GUEST_REGS_G13 + { "G14", "G14", "G14_tmp", "g14_tmp" }, // HEX_REG_GUEST_REGS_G14 + { "G15", "G15", "G15_tmp", "g15_tmp" }, // HEX_REG_GUEST_REGS_G15 + { "G16", "GPMUCNT4", "G16_tmp", "gpmucnt4_tmp" }, // HEX_REG_GUEST_REGS_G16 + { "G17", "GPMUCNT5", "G17_tmp", "gpmucnt5_tmp" }, // HEX_REG_GUEST_REGS_G17 + { "G18", "GPMUCNT6", "G18_tmp", "gpmucnt6_tmp" }, // HEX_REG_GUEST_REGS_G18 + { "G19", "GPMUCNT7", "G19_tmp", "gpmucnt7_tmp" }, // HEX_REG_GUEST_REGS_G19 + { "G20", "G20", "G20_tmp", "g20_tmp" }, // HEX_REG_GUEST_REGS_G20 + { "G21", "G21", "G21_tmp", "g21_tmp" }, // HEX_REG_GUEST_REGS_G21 + { "G22", "G22", "G22_tmp", "g22_tmp" }, // HEX_REG_GUEST_REGS_G22 + { "G23", "G23", "G23_tmp", "g23_tmp" }, // HEX_REG_GUEST_REGS_G23 + { "G24", "GPCYCLELO", "G24_tmp", "gpcyclelo_tmp" }, // HEX_REG_GUEST_REGS_G24 + { "G25", "GPCYCLEHI", "G25_tmp", "gpcyclehi_tmp" }, // HEX_REG_GUEST_REGS_G25 + { "G26", "GPMUCNT0", "G26_tmp", "gpmucnt0_tmp" }, // HEX_REG_GUEST_REGS_G26 + { "G27", "GPMUCNT1", "G27_tmp", "gpmucnt1_tmp" }, // HEX_REG_GUEST_REGS_G27 + { "G28", "GPMUCNT2", "G28_tmp", "gpmucnt2_tmp" }, // HEX_REG_GUEST_REGS_G28 + { "G29", "GPMUCNT3", "G29_tmp", "gpmucnt3_tmp" }, // HEX_REG_GUEST_REGS_G29 + { "G30", "G30", "G30_tmp", "g30_tmp" }, // HEX_REG_GUEST_REGS_G30 + { "G31", "G31", "G31_tmp", "g31_tmp" }, // HEX_REG_GUEST_REGS_G31 +}; + +/** + * \brief Lookup table for register names and alias of class GuestRegs64. + */ +HexRegNames hexagon_guestregs64_lt_v69[] = { + { "G1:0", "G1:0", "G1:0_tmp", "g1:0_tmp" }, // HEX_REG_GUEST_REGS64_G1_0 + { NULL, NULL, NULL, NULL }, // - + { "G3:2", "G3:2", "G3:2_tmp", "g3:2_tmp" }, // HEX_REG_GUEST_REGS64_G3_2 + { NULL, NULL, NULL, NULL }, // - + { "G5:4", "G5:4", "G5:4_tmp", "g5:4_tmp" }, // HEX_REG_GUEST_REGS64_G5_4 + { NULL, NULL, NULL, NULL }, // - + { "G7:6", "G7:6", "G7:6_tmp", "g7:6_tmp" }, // HEX_REG_GUEST_REGS64_G7_6 + { NULL, NULL, NULL, NULL }, // - + { "G9:8", "G9:8", "G9:8_tmp", "g9:8_tmp" }, // HEX_REG_GUEST_REGS64_G9_8 + { NULL, NULL, NULL, NULL }, // - + { "G11:10", "G11:10", "G11:10_tmp", "g11:10_tmp" }, // HEX_REG_GUEST_REGS64_G11_10 + { NULL, NULL, NULL, NULL }, // - + { "G13:12", "G13:12", "G13:12_tmp", "g13:12_tmp" }, // HEX_REG_GUEST_REGS64_G13_12 + { NULL, NULL, NULL, NULL }, // - + { "G15:14", "G15:14", "G15:14_tmp", "g15:14_tmp" }, // HEX_REG_GUEST_REGS64_G15_14 + { NULL, NULL, NULL, NULL }, // - + { "G17:16", "G17:16", "G17:16_tmp", "g17:16_tmp" }, // HEX_REG_GUEST_REGS64_G17_16 + { NULL, NULL, NULL, NULL }, // - + { "G19:18", "G19:18", "G19:18_tmp", "g19:18_tmp" }, // HEX_REG_GUEST_REGS64_G19_18 + { NULL, NULL, NULL, NULL }, // - + { "G21:20", "G21:20", "G21:20_tmp", "g21:20_tmp" }, // HEX_REG_GUEST_REGS64_G21_20 + { NULL, NULL, NULL, NULL }, // - + { "G23:22", "G23:22", "G23:22_tmp", "g23:22_tmp" }, // HEX_REG_GUEST_REGS64_G23_22 + { NULL, NULL, NULL, NULL }, // - + { "G25:24", "G25:24", "G25:24_tmp", "g25:24_tmp" }, // HEX_REG_GUEST_REGS64_G25_24 + { NULL, NULL, NULL, NULL }, // - + { "G27:26", "G27:26", "G27:26_tmp", "g27:26_tmp" }, // HEX_REG_GUEST_REGS64_G27_26 + { NULL, NULL, NULL, NULL }, // - + { "G29:28", "G29:28", "G29:28_tmp", "g29:28_tmp" }, // HEX_REG_GUEST_REGS64_G29_28 + { NULL, NULL, NULL, NULL }, // - + { "G31:30", "G31:30", "G31:30_tmp", "g31:30_tmp" }, // HEX_REG_GUEST_REGS64_G31_30 +}; + +/** + * \brief Lookup table for register names and alias of class HvxQR. + */ +HexRegNames hexagon_hvxqr_lt_v69[] = { + { "Q0", "Q0", "Q0_tmp", "q0_tmp" }, // HEX_REG_HVX_QR_Q0 + { "Q1", "Q1", "Q1_tmp", "q1_tmp" }, // HEX_REG_HVX_QR_Q1 + { "Q2", "Q2", "Q2_tmp", "q2_tmp" }, // HEX_REG_HVX_QR_Q2 + { "Q3", "Q3", "Q3_tmp", "q3_tmp" }, // HEX_REG_HVX_QR_Q3 +}; + +/** + * \brief Lookup table for register names and alias of class HvxVQR. + */ +HexRegNames hexagon_hvxvqr_lt_v69[] = { + { "V3:0", "V3:0", "V3:0_tmp", "v3:0_tmp" }, // HEX_REG_HVX_VQR_V3_0 + { NULL, NULL, NULL, NULL }, // - + { NULL, NULL, NULL, NULL }, // - + { NULL, NULL, NULL, NULL }, // - + { "V7:4", "V7:4", "V7:4_tmp", "v7:4_tmp" }, // HEX_REG_HVX_VQR_V7_4 + { NULL, NULL, NULL, NULL }, // - + { NULL, NULL, NULL, NULL }, // - + { NULL, NULL, NULL, NULL }, // - + { "V11:8", "V11:8", "V11:8_tmp", "v11:8_tmp" }, // HEX_REG_HVX_VQR_V11_8 + { NULL, NULL, NULL, NULL }, // - + { NULL, NULL, NULL, NULL }, // - + { NULL, NULL, NULL, NULL }, // - + { "V15:12", "V15:12", "V15:12_tmp", "v15:12_tmp" }, // HEX_REG_HVX_VQR_V15_12 + { NULL, NULL, NULL, NULL }, // - + { NULL, NULL, NULL, NULL }, // - + { NULL, NULL, NULL, NULL }, // - + { "V19:16", "V19:16", "V19:16_tmp", "v19:16_tmp" }, // HEX_REG_HVX_VQR_V19_16 + { NULL, NULL, NULL, NULL }, // - + { NULL, NULL, NULL, NULL }, // - + { NULL, NULL, NULL, NULL }, // - + { "V23:20", "V23:20", "V23:20_tmp", "v23:20_tmp" }, // HEX_REG_HVX_VQR_V23_20 + { NULL, NULL, NULL, NULL }, // - + { NULL, NULL, NULL, NULL }, // - + { NULL, NULL, NULL, NULL }, // - + { "V27:24", "V27:24", "V27:24_tmp", "v27:24_tmp" }, // HEX_REG_HVX_VQR_V27_24 + { NULL, NULL, NULL, NULL }, // - + { NULL, NULL, NULL, NULL }, // - + { NULL, NULL, NULL, NULL }, // - + { "V31:28", "V31:28", "V31:28_tmp", "v31:28_tmp" }, // HEX_REG_HVX_VQR_V31_28 +}; + +/** + * \brief Lookup table for register names and alias of class HvxVR. + */ +HexRegNames hexagon_hvxvr_lt_v69[] = { + { "V0", "V0", "V0_tmp", "v0_tmp" }, // HEX_REG_HVX_VR_V0 + { "V1", "V1", "V1_tmp", "v1_tmp" }, // HEX_REG_HVX_VR_V1 + { "V2", "V2", "V2_tmp", "v2_tmp" }, // HEX_REG_HVX_VR_V2 + { "V3", "V3", "V3_tmp", "v3_tmp" }, // HEX_REG_HVX_VR_V3 + { "V4", "V4", "V4_tmp", "v4_tmp" }, // HEX_REG_HVX_VR_V4 + { "V5", "V5", "V5_tmp", "v5_tmp" }, // HEX_REG_HVX_VR_V5 + { "V6", "V6", "V6_tmp", "v6_tmp" }, // HEX_REG_HVX_VR_V6 + { "V7", "V7", "V7_tmp", "v7_tmp" }, // HEX_REG_HVX_VR_V7 + { "V8", "V8", "V8_tmp", "v8_tmp" }, // HEX_REG_HVX_VR_V8 + { "V9", "V9", "V9_tmp", "v9_tmp" }, // HEX_REG_HVX_VR_V9 + { "V10", "V10", "V10_tmp", "v10_tmp" }, // HEX_REG_HVX_VR_V10 + { "V11", "V11", "V11_tmp", "v11_tmp" }, // HEX_REG_HVX_VR_V11 + { "V12", "V12", "V12_tmp", "v12_tmp" }, // HEX_REG_HVX_VR_V12 + { "V13", "V13", "V13_tmp", "v13_tmp" }, // HEX_REG_HVX_VR_V13 + { "V14", "V14", "V14_tmp", "v14_tmp" }, // HEX_REG_HVX_VR_V14 + { "V15", "V15", "V15_tmp", "v15_tmp" }, // HEX_REG_HVX_VR_V15 + { "V16", "V16", "V16_tmp", "v16_tmp" }, // HEX_REG_HVX_VR_V16 + { "V17", "V17", "V17_tmp", "v17_tmp" }, // HEX_REG_HVX_VR_V17 + { "V18", "V18", "V18_tmp", "v18_tmp" }, // HEX_REG_HVX_VR_V18 + { "V19", "V19", "V19_tmp", "v19_tmp" }, // HEX_REG_HVX_VR_V19 + { "V20", "V20", "V20_tmp", "v20_tmp" }, // HEX_REG_HVX_VR_V20 + { "V21", "V21", "V21_tmp", "v21_tmp" }, // HEX_REG_HVX_VR_V21 + { "V22", "V22", "V22_tmp", "v22_tmp" }, // HEX_REG_HVX_VR_V22 + { "V23", "V23", "V23_tmp", "v23_tmp" }, // HEX_REG_HVX_VR_V23 + { "V24", "V24", "V24_tmp", "v24_tmp" }, // HEX_REG_HVX_VR_V24 + { "V25", "V25", "V25_tmp", "v25_tmp" }, // HEX_REG_HVX_VR_V25 + { "V26", "V26", "V26_tmp", "v26_tmp" }, // HEX_REG_HVX_VR_V26 + { "V27", "V27", "V27_tmp", "v27_tmp" }, // HEX_REG_HVX_VR_V27 + { "V28", "V28", "V28_tmp", "v28_tmp" }, // HEX_REG_HVX_VR_V28 + { "V29", "V29", "V29_tmp", "v29_tmp" }, // HEX_REG_HVX_VR_V29 + { "V30", "V30", "V30_tmp", "v30_tmp" }, // HEX_REG_HVX_VR_V30 + { "V31", "V31", "V31_tmp", "v31_tmp" }, // HEX_REG_HVX_VR_V31 +}; + +/** + * \brief Lookup table for register names and alias of class HvxWR. + */ +HexRegNames hexagon_hvxwr_lt_v69[] = { + { "V1:0", "V1:0", "V1:0_tmp", "v1:0_tmp" }, // HEX_REG_HVX_WR_V1_0 + { NULL, NULL, NULL, NULL }, // - + { "V3:2", "V3:2", "V3:2_tmp", "v3:2_tmp" }, // HEX_REG_HVX_WR_V3_2 + { NULL, NULL, NULL, NULL }, // - + { "V5:4", "V5:4", "V5:4_tmp", "v5:4_tmp" }, // HEX_REG_HVX_WR_V5_4 + { NULL, NULL, NULL, NULL }, // - + { "V7:6", "V7:6", "V7:6_tmp", "v7:6_tmp" }, // HEX_REG_HVX_WR_V7_6 + { NULL, NULL, NULL, NULL }, // - + { "V9:8", "V9:8", "V9:8_tmp", "v9:8_tmp" }, // HEX_REG_HVX_WR_V9_8 + { NULL, NULL, NULL, NULL }, // - + { "V11:10", "V11:10", "V11:10_tmp", "v11:10_tmp" }, // HEX_REG_HVX_WR_V11_10 + { NULL, NULL, NULL, NULL }, // - + { "V13:12", "V13:12", "V13:12_tmp", "v13:12_tmp" }, // HEX_REG_HVX_WR_V13_12 + { NULL, NULL, NULL, NULL }, // - + { "V15:14", "V15:14", "V15:14_tmp", "v15:14_tmp" }, // HEX_REG_HVX_WR_V15_14 + { NULL, NULL, NULL, NULL }, // - + { "V17:16", "V17:16", "V17:16_tmp", "v17:16_tmp" }, // HEX_REG_HVX_WR_V17_16 + { NULL, NULL, NULL, NULL }, // - + { "V19:18", "V19:18", "V19:18_tmp", "v19:18_tmp" }, // HEX_REG_HVX_WR_V19_18 + { NULL, NULL, NULL, NULL }, // - + { "V21:20", "V21:20", "V21:20_tmp", "v21:20_tmp" }, // HEX_REG_HVX_WR_V21_20 + { NULL, NULL, NULL, NULL }, // - + { "V23:22", "V23:22", "V23:22_tmp", "v23:22_tmp" }, // HEX_REG_HVX_WR_V23_22 + { NULL, NULL, NULL, NULL }, // - + { "V25:24", "V25:24", "V25:24_tmp", "v25:24_tmp" }, // HEX_REG_HVX_WR_V25_24 + { NULL, NULL, NULL, NULL }, // - + { "V27:26", "V27:26", "V27:26_tmp", "v27:26_tmp" }, // HEX_REG_HVX_WR_V27_26 + { NULL, NULL, NULL, NULL }, // - + { "V29:28", "V29:28", "V29:28_tmp", "v29:28_tmp" }, // HEX_REG_HVX_WR_V29_28 + { NULL, NULL, NULL, NULL }, // - + { "V31:30", "V31:30", "V31:30_tmp", "v31:30_tmp" }, // HEX_REG_HVX_WR_V31_30 +}; + +/** + * \brief Lookup table for register names and alias of class IntRegs. + */ +HexRegNames hexagon_intregs_lt_v69[] = { + { "R0", "R0", "R0_tmp", "r0_tmp" }, // HEX_REG_INT_REGS_R0 + { "R1", "R1", "R1_tmp", "r1_tmp" }, // HEX_REG_INT_REGS_R1 + { "R2", "R2", "R2_tmp", "r2_tmp" }, // HEX_REG_INT_REGS_R2 + { "R3", "R3", "R3_tmp", "r3_tmp" }, // HEX_REG_INT_REGS_R3 + { "R4", "R4", "R4_tmp", "r4_tmp" }, // HEX_REG_INT_REGS_R4 + { "R5", "R5", "R5_tmp", "r5_tmp" }, // HEX_REG_INT_REGS_R5 + { "R6", "R6", "R6_tmp", "r6_tmp" }, // HEX_REG_INT_REGS_R6 + { "R7", "R7", "R7_tmp", "r7_tmp" }, // HEX_REG_INT_REGS_R7 + { "R8", "R8", "R8_tmp", "r8_tmp" }, // HEX_REG_INT_REGS_R8 + { "R9", "R9", "R9_tmp", "r9_tmp" }, // HEX_REG_INT_REGS_R9 + { "R10", "R10", "R10_tmp", "r10_tmp" }, // HEX_REG_INT_REGS_R10 + { "R11", "R11", "R11_tmp", "r11_tmp" }, // HEX_REG_INT_REGS_R11 + { "R12", "R12", "R12_tmp", "r12_tmp" }, // HEX_REG_INT_REGS_R12 + { "R13", "R13", "R13_tmp", "r13_tmp" }, // HEX_REG_INT_REGS_R13 + { "R14", "R14", "R14_tmp", "r14_tmp" }, // HEX_REG_INT_REGS_R14 + { "R15", "R15", "R15_tmp", "r15_tmp" }, // HEX_REG_INT_REGS_R15 + { "R16", "R16", "R16_tmp", "r16_tmp" }, // HEX_REG_INT_REGS_R16 + { "R17", "R17", "R17_tmp", "r17_tmp" }, // HEX_REG_INT_REGS_R17 + { "R18", "R18", "R18_tmp", "r18_tmp" }, // HEX_REG_INT_REGS_R18 + { "R19", "R19", "R19_tmp", "r19_tmp" }, // HEX_REG_INT_REGS_R19 + { "R20", "R20", "R20_tmp", "r20_tmp" }, // HEX_REG_INT_REGS_R20 + { "R21", "R21", "R21_tmp", "r21_tmp" }, // HEX_REG_INT_REGS_R21 + { "R22", "R22", "R22_tmp", "r22_tmp" }, // HEX_REG_INT_REGS_R22 + { "R23", "R23", "R23_tmp", "r23_tmp" }, // HEX_REG_INT_REGS_R23 + { "R24", "R24", "R24_tmp", "r24_tmp" }, // HEX_REG_INT_REGS_R24 + { "R25", "R25", "R25_tmp", "r25_tmp" }, // HEX_REG_INT_REGS_R25 + { "R26", "R26", "R26_tmp", "r26_tmp" }, // HEX_REG_INT_REGS_R26 + { "R27", "R27", "R27_tmp", "r27_tmp" }, // HEX_REG_INT_REGS_R27 + { "R28", "R28", "R28_tmp", "r28_tmp" }, // HEX_REG_INT_REGS_R28 + { "R29", "SP", "R29_tmp", "sp_tmp" }, // HEX_REG_INT_REGS_R29 + { "R30", "FP", "R30_tmp", "fp_tmp" }, // HEX_REG_INT_REGS_R30 + { "R31", "LR", "R31_tmp", "lr_tmp" }, // HEX_REG_INT_REGS_R31 +}; + +/** + * \brief Lookup table for register names and alias of class IntRegsLow8. + */ +HexRegNames hexagon_intregslow8_lt_v69[] = { + { "R0", "R0", "R0_tmp", "r0_tmp" }, // HEX_REG_INT_REGS_LOW8_R0 + { "R1", "R1", "R1_tmp", "r1_tmp" }, // HEX_REG_INT_REGS_LOW8_R1 + { "R2", "R2", "R2_tmp", "r2_tmp" }, // HEX_REG_INT_REGS_LOW8_R2 + { "R3", "R3", "R3_tmp", "r3_tmp" }, // HEX_REG_INT_REGS_LOW8_R3 + { "R4", "R4", "R4_tmp", "r4_tmp" }, // HEX_REG_INT_REGS_LOW8_R4 + { "R5", "R5", "R5_tmp", "r5_tmp" }, // HEX_REG_INT_REGS_LOW8_R5 + { "R6", "R6", "R6_tmp", "r6_tmp" }, // HEX_REG_INT_REGS_LOW8_R6 + { "R7", "R7", "R7_tmp", "r7_tmp" }, // HEX_REG_INT_REGS_LOW8_R7 +}; + +/** + * \brief Lookup table for register names and alias of class ModRegs. + */ +HexRegNames hexagon_modregs_lt_v69[] = { + { NULL, NULL, NULL, NULL }, // - + { NULL, NULL, NULL, NULL }, // - + { NULL, NULL, NULL, NULL }, // - + { NULL, NULL, NULL, NULL }, // - + { NULL, NULL, NULL, NULL }, // - + { NULL, NULL, NULL, NULL }, // - + { "C6", "M0", "C6_tmp", "m0_tmp" }, // HEX_REG_MOD_REGS_C6 + { "C7", "M1", "C7_tmp", "m1_tmp" }, // HEX_REG_MOD_REGS_C7 +}; + +/** + * \brief Lookup table for register names and alias of class PredRegs. + */ +HexRegNames hexagon_predregs_lt_v69[] = { + { "P0", "P0", "P0_tmp", "p0_tmp" }, // HEX_REG_PRED_REGS_P0 + { "P1", "P1", "P1_tmp", "p1_tmp" }, // HEX_REG_PRED_REGS_P1 + { "P2", "P2", "P2_tmp", "p2_tmp" }, // HEX_REG_PRED_REGS_P2 + { "P3", "P3", "P3_tmp", "p3_tmp" }, // HEX_REG_PRED_REGS_P3 +}; + +/** + * \brief Lookup table for register names and alias of class SysRegs. + */ +HexRegNames hexagon_sysregs_lt_v69[] = { + { "S0", "SGP0", "S0_tmp", "sgp0_tmp" }, // HEX_REG_SYS_REGS_S0 + { "S1", "SGP1", "S1_tmp", "sgp1_tmp" }, // HEX_REG_SYS_REGS_S1 + { "S2", "STID", "S2_tmp", "stid_tmp" }, // HEX_REG_SYS_REGS_S2 + { "S3", "ELR", "S3_tmp", "elr_tmp" }, // HEX_REG_SYS_REGS_S3 + { "S4", "BADVA0", "S4_tmp", "badva0_tmp" }, // HEX_REG_SYS_REGS_S4 + { "S5", "BADVA1", "S5_tmp", "badva1_tmp" }, // HEX_REG_SYS_REGS_S5 + { "S6", "SSR", "S6_tmp", "ssr_tmp" }, // HEX_REG_SYS_REGS_S6 + { "S7", "CCR", "S7_tmp", "ccr_tmp" }, // HEX_REG_SYS_REGS_S7 + { "S8", "HTID", "S8_tmp", "htid_tmp" }, // HEX_REG_SYS_REGS_S8 + { "S9", "BADVA", "S9_tmp", "badva_tmp" }, // HEX_REG_SYS_REGS_S9 + { "S10", "IMASK", "S10_tmp", "imask_tmp" }, // HEX_REG_SYS_REGS_S10 + { "S11", "S11", "S11_tmp", "s11_tmp" }, // HEX_REG_SYS_REGS_S11 + { "S12", "S12", "S12_tmp", "s12_tmp" }, // HEX_REG_SYS_REGS_S12 + { "S13", "S13", "S13_tmp", "s13_tmp" }, // HEX_REG_SYS_REGS_S13 + { "S14", "S14", "S14_tmp", "s14_tmp" }, // HEX_REG_SYS_REGS_S14 + { "S15", "S15", "S15_tmp", "s15_tmp" }, // HEX_REG_SYS_REGS_S15 + { "S16", "EVB", "S16_tmp", "evb_tmp" }, // HEX_REG_SYS_REGS_S16 + { "S17", "MODECTL", "S17_tmp", "modectl_tmp" }, // HEX_REG_SYS_REGS_S17 + { "S18", "SYSCFG", "S18_tmp", "syscfg_tmp" }, // HEX_REG_SYS_REGS_S18 + { "S19", "S19", "S19_tmp", "s19_tmp" }, // HEX_REG_SYS_REGS_S19 + { "S20", "S20", "S20_tmp", "s20_tmp" }, // HEX_REG_SYS_REGS_S20 + { "S21", "VID", "S21_tmp", "vid_tmp" }, // HEX_REG_SYS_REGS_S21 + { "S22", "S22", "S22_tmp", "s22_tmp" }, // HEX_REG_SYS_REGS_S22 + { "S23", "S23", "S23_tmp", "s23_tmp" }, // HEX_REG_SYS_REGS_S23 + { "S24", "S24", "S24_tmp", "s24_tmp" }, // HEX_REG_SYS_REGS_S24 + { "S25", "S25", "S25_tmp", "s25_tmp" }, // HEX_REG_SYS_REGS_S25 + { "S26", "S26", "S26_tmp", "s26_tmp" }, // HEX_REG_SYS_REGS_S26 + { "S27", "CFGBASE", "S27_tmp", "cfgbase_tmp" }, // HEX_REG_SYS_REGS_S27 + { "S28", "DIAG", "S28_tmp", "diag_tmp" }, // HEX_REG_SYS_REGS_S28 + { "S29", "REV", "S29_tmp", "rev_tmp" }, // HEX_REG_SYS_REGS_S29 + { "S30", "PCYCLELO", "S30_tmp", "pcyclelo_tmp" }, // HEX_REG_SYS_REGS_S30 + { "S31", "PCYCLEHI", "S31_tmp", "pcyclehi_tmp" }, // HEX_REG_SYS_REGS_S31 + { "S32", "ISDBST", "S32_tmp", "isdbst_tmp" }, // HEX_REG_SYS_REGS_S32 + { "S33", "ISDBCFG0", "S33_tmp", "isdbcfg0_tmp" }, // HEX_REG_SYS_REGS_S33 + { "S34", "ISDBCFG1", "S34_tmp", "isdbcfg1_tmp" }, // HEX_REG_SYS_REGS_S34 + { "S35", "S35", "S35_tmp", "s35_tmp" }, // HEX_REG_SYS_REGS_S35 + { "S36", "BRKPTPC0", "S36_tmp", "brkptpc0_tmp" }, // HEX_REG_SYS_REGS_S36 + { "S37", "BRKPTCFG0", "S37_tmp", "brkptcfg0_tmp" }, // HEX_REG_SYS_REGS_S37 + { "S38", "BRKPTPC1", "S38_tmp", "brkptpc1_tmp" }, // HEX_REG_SYS_REGS_S38 + { "S39", "BRKPTCFG1", "S39_tmp", "brkptcfg1_tmp" }, // HEX_REG_SYS_REGS_S39 + { "S40", "ISDBMBXIN", "S40_tmp", "isdbmbxin_tmp" }, // HEX_REG_SYS_REGS_S40 + { "S41", "ISDBMBXOUT", "S41_tmp", "isdbmbxout_tmp" }, // HEX_REG_SYS_REGS_S41 + { "S42", "ISDBEN", "S42_tmp", "isdben_tmp" }, // HEX_REG_SYS_REGS_S42 + { "S43", "ISDBGPR", "S43_tmp", "isdbgpr_tmp" }, // HEX_REG_SYS_REGS_S43 + { "S44", "S44", "S44_tmp", "s44_tmp" }, // HEX_REG_SYS_REGS_S44 + { "S45", "S45", "S45_tmp", "s45_tmp" }, // HEX_REG_SYS_REGS_S45 + { "S46", "S46", "S46_tmp", "s46_tmp" }, // HEX_REG_SYS_REGS_S46 + { "S47", "S47", "S47_tmp", "s47_tmp" }, // HEX_REG_SYS_REGS_S47 + { "S48", "PMUCNT0", "S48_tmp", "pmucnt0_tmp" }, // HEX_REG_SYS_REGS_S48 + { "S49", "PMUCNT1", "S49_tmp", "pmucnt1_tmp" }, // HEX_REG_SYS_REGS_S49 + { "S50", "PMUCNT2", "S50_tmp", "pmucnt2_tmp" }, // HEX_REG_SYS_REGS_S50 + { "S51", "PMUCNT3", "S51_tmp", "pmucnt3_tmp" }, // HEX_REG_SYS_REGS_S51 + { "S52", "PMUEVTCFG", "S52_tmp", "pmuevtcfg_tmp" }, // HEX_REG_SYS_REGS_S52 + { "S53", "PMUCFG", "S53_tmp", "pmucfg_tmp" }, // HEX_REG_SYS_REGS_S53 + { "S54", "S54", "S54_tmp", "s54_tmp" }, // HEX_REG_SYS_REGS_S54 + { "S55", "S55", "S55_tmp", "s55_tmp" }, // HEX_REG_SYS_REGS_S55 + { "S56", "S56", "S56_tmp", "s56_tmp" }, // HEX_REG_SYS_REGS_S56 + { "S57", "S57", "S57_tmp", "s57_tmp" }, // HEX_REG_SYS_REGS_S57 + { "S58", "S58", "S58_tmp", "s58_tmp" }, // HEX_REG_SYS_REGS_S58 + { "S59", "S59", "S59_tmp", "s59_tmp" }, // HEX_REG_SYS_REGS_S59 + { "S60", "S60", "S60_tmp", "s60_tmp" }, // HEX_REG_SYS_REGS_S60 + { "S61", "S61", "S61_tmp", "s61_tmp" }, // HEX_REG_SYS_REGS_S61 + { "S62", "S62", "S62_tmp", "s62_tmp" }, // HEX_REG_SYS_REGS_S62 + { "S63", "S63", "S63_tmp", "s63_tmp" }, // HEX_REG_SYS_REGS_S63 + { "S64", "S64", "S64_tmp", "s64_tmp" }, // HEX_REG_SYS_REGS_S64 + { "S65", "S65", "S65_tmp", "s65_tmp" }, // HEX_REG_SYS_REGS_S65 + { "S66", "S66", "S66_tmp", "s66_tmp" }, // HEX_REG_SYS_REGS_S66 + { "S67", "S67", "S67_tmp", "s67_tmp" }, // HEX_REG_SYS_REGS_S67 + { "S68", "S68", "S68_tmp", "s68_tmp" }, // HEX_REG_SYS_REGS_S68 + { "S69", "S69", "S69_tmp", "s69_tmp" }, // HEX_REG_SYS_REGS_S69 + { "S70", "S70", "S70_tmp", "s70_tmp" }, // HEX_REG_SYS_REGS_S70 + { "S71", "S71", "S71_tmp", "s71_tmp" }, // HEX_REG_SYS_REGS_S71 + { "S72", "S72", "S72_tmp", "s72_tmp" }, // HEX_REG_SYS_REGS_S72 + { "S73", "S73", "S73_tmp", "s73_tmp" }, // HEX_REG_SYS_REGS_S73 + { "S74", "S74", "S74_tmp", "s74_tmp" }, // HEX_REG_SYS_REGS_S74 + { "S75", "S75", "S75_tmp", "s75_tmp" }, // HEX_REG_SYS_REGS_S75 + { "S76", "S76", "S76_tmp", "s76_tmp" }, // HEX_REG_SYS_REGS_S76 + { "S77", "S77", "S77_tmp", "s77_tmp" }, // HEX_REG_SYS_REGS_S77 + { "S78", "S78", "S78_tmp", "s78_tmp" }, // HEX_REG_SYS_REGS_S78 + { "S79", "S79", "S79_tmp", "s79_tmp" }, // HEX_REG_SYS_REGS_S79 + { "S80", "S80", "S80_tmp", "s80_tmp" }, // HEX_REG_SYS_REGS_S80 +}; + +/** + * \brief Lookup table for register names and alias of class SysRegs64. + */ +HexRegNames hexagon_sysregs64_lt_v69[] = { + { "S1:0", "SGP1:0", "S1:0_tmp", "sgp1:0_tmp" }, // HEX_REG_SYS_REGS64_S1_0 + { NULL, NULL, NULL, NULL }, // - + { "S3:2", "S3:2", "S3:2_tmp", "s3:2_tmp" }, // HEX_REG_SYS_REGS64_S3_2 + { NULL, NULL, NULL, NULL }, // - + { "S5:4", "BADVA1:0", "S5:4_tmp", "badva1:0_tmp" }, // HEX_REG_SYS_REGS64_S5_4 + { NULL, NULL, NULL, NULL }, // - + { "S7:6", "CCR:SSR", "S7:6_tmp", "ccr:ssr_tmp" }, // HEX_REG_SYS_REGS64_S7_6 + { NULL, NULL, NULL, NULL }, // - + { "S9:8", "S9:8", "S9:8_tmp", "s9:8_tmp" }, // HEX_REG_SYS_REGS64_S9_8 + { NULL, NULL, NULL, NULL }, // - + { "S11:10", "S11:10", "S11:10_tmp", "s11:10_tmp" }, // HEX_REG_SYS_REGS64_S11_10 + { NULL, NULL, NULL, NULL }, // - + { "S13:12", "S13:12", "S13:12_tmp", "s13:12_tmp" }, // HEX_REG_SYS_REGS64_S13_12 + { NULL, NULL, NULL, NULL }, // - + { "S15:14", "S15:14", "S15:14_tmp", "s15:14_tmp" }, // HEX_REG_SYS_REGS64_S15_14 + { NULL, NULL, NULL, NULL }, // - + { "S17:16", "S17:16", "S17:16_tmp", "s17:16_tmp" }, // HEX_REG_SYS_REGS64_S17_16 + { NULL, NULL, NULL, NULL }, // - + { "S19:18", "S19:18", "S19:18_tmp", "s19:18_tmp" }, // HEX_REG_SYS_REGS64_S19_18 + { NULL, NULL, NULL, NULL }, // - + { "S21:20", "S21:20", "S21:20_tmp", "s21:20_tmp" }, // HEX_REG_SYS_REGS64_S21_20 + { NULL, NULL, NULL, NULL }, // - + { "S23:22", "S23:22", "S23:22_tmp", "s23:22_tmp" }, // HEX_REG_SYS_REGS64_S23_22 + { NULL, NULL, NULL, NULL }, // - + { "S25:24", "S25:24", "S25:24_tmp", "s25:24_tmp" }, // HEX_REG_SYS_REGS64_S25_24 + { NULL, NULL, NULL, NULL }, // - + { "S27:26", "S27:26", "S27:26_tmp", "s27:26_tmp" }, // HEX_REG_SYS_REGS64_S27_26 + { NULL, NULL, NULL, NULL }, // - + { "S29:28", "S29:28", "S29:28_tmp", "s29:28_tmp" }, // HEX_REG_SYS_REGS64_S29_28 + { NULL, NULL, NULL, NULL }, // - + { "S31:30", "PCYCLE", "S31:30_tmp", "pcycle_tmp" }, // HEX_REG_SYS_REGS64_S31_30 + { NULL, NULL, NULL, NULL }, // - + { "S33:32", "S33:32", "S33:32_tmp", "s33:32_tmp" }, // HEX_REG_SYS_REGS64_S33_32 + { NULL, NULL, NULL, NULL }, // - + { "S35:34", "S35:34", "S35:34_tmp", "s35:34_tmp" }, // HEX_REG_SYS_REGS64_S35_34 + { NULL, NULL, NULL, NULL }, // - + { "S37:36", "S37:36", "S37:36_tmp", "s37:36_tmp" }, // HEX_REG_SYS_REGS64_S37_36 + { NULL, NULL, NULL, NULL }, // - + { "S39:38", "S39:38", "S39:38_tmp", "s39:38_tmp" }, // HEX_REG_SYS_REGS64_S39_38 + { NULL, NULL, NULL, NULL }, // - + { "S41:40", "S41:40", "S41:40_tmp", "s41:40_tmp" }, // HEX_REG_SYS_REGS64_S41_40 + { NULL, NULL, NULL, NULL }, // - + { "S43:42", "S43:42", "S43:42_tmp", "s43:42_tmp" }, // HEX_REG_SYS_REGS64_S43_42 + { NULL, NULL, NULL, NULL }, // - + { "S45:44", "S45:44", "S45:44_tmp", "s45:44_tmp" }, // HEX_REG_SYS_REGS64_S45_44 + { NULL, NULL, NULL, NULL }, // - + { "S47:46", "S47:46", "S47:46_tmp", "s47:46_tmp" }, // HEX_REG_SYS_REGS64_S47_46 + { NULL, NULL, NULL, NULL }, // - + { "S49:48", "S49:48", "S49:48_tmp", "s49:48_tmp" }, // HEX_REG_SYS_REGS64_S49_48 + { NULL, NULL, NULL, NULL }, // - + { "S51:50", "S51:50", "S51:50_tmp", "s51:50_tmp" }, // HEX_REG_SYS_REGS64_S51_50 + { NULL, NULL, NULL, NULL }, // - + { "S53:52", "S53:52", "S53:52_tmp", "s53:52_tmp" }, // HEX_REG_SYS_REGS64_S53_52 + { NULL, NULL, NULL, NULL }, // - + { "S55:54", "S55:54", "S55:54_tmp", "s55:54_tmp" }, // HEX_REG_SYS_REGS64_S55_54 + { NULL, NULL, NULL, NULL }, // - + { "S57:56", "S57:56", "S57:56_tmp", "s57:56_tmp" }, // HEX_REG_SYS_REGS64_S57_56 + { NULL, NULL, NULL, NULL }, // - + { "S59:58", "S59:58", "S59:58_tmp", "s59:58_tmp" }, // HEX_REG_SYS_REGS64_S59_58 + { NULL, NULL, NULL, NULL }, // - + { "S61:60", "S61:60", "S61:60_tmp", "s61:60_tmp" }, // HEX_REG_SYS_REGS64_S61_60 + { NULL, NULL, NULL, NULL }, // - + { "S63:62", "S63:62", "S63:62_tmp", "s63:62_tmp" }, // HEX_REG_SYS_REGS64_S63_62 + { NULL, NULL, NULL, NULL }, // - + { "S65:64", "S65:64", "S65:64_tmp", "s65:64_tmp" }, // HEX_REG_SYS_REGS64_S65_64 + { NULL, NULL, NULL, NULL }, // - + { "S67:66", "S67:66", "S67:66_tmp", "s67:66_tmp" }, // HEX_REG_SYS_REGS64_S67_66 + { NULL, NULL, NULL, NULL }, // - + { "S69:68", "S69:68", "S69:68_tmp", "s69:68_tmp" }, // HEX_REG_SYS_REGS64_S69_68 + { NULL, NULL, NULL, NULL }, // - + { "S71:70", "S71:70", "S71:70_tmp", "s71:70_tmp" }, // HEX_REG_SYS_REGS64_S71_70 + { NULL, NULL, NULL, NULL }, // - + { "S73:72", "S73:72", "S73:72_tmp", "s73:72_tmp" }, // HEX_REG_SYS_REGS64_S73_72 + { NULL, NULL, NULL, NULL }, // - + { "S75:74", "S75:74", "S75:74_tmp", "s75:74_tmp" }, // HEX_REG_SYS_REGS64_S75_74 + { NULL, NULL, NULL, NULL }, // - + { "S77:76", "S77:76", "S77:76_tmp", "s77:76_tmp" }, // HEX_REG_SYS_REGS64_S77_76 + { NULL, NULL, NULL, NULL }, // - + { "S79:78", "S79:78", "S79:78_tmp", "s79:78_tmp" }, // HEX_REG_SYS_REGS64_S79_78 +}; + +#endif \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_A2_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_A2_ops.c new file mode 100644 index 00000000000..78a42258837 --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_A2_ops.c @@ -0,0 +1,9164 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// Rd = abs(Rs) +RzILOpEffect *hex_il_op_a2_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = ((Rs < 0x0) ? (-Rs) : Rs); + RzILOpPure *op_LT_3 = SLT(Rs, SN(32, 0)); + RzILOpPure *op_NEG_4 = NEG(DUP(Rs)); + RzILOpPure *cond_5 = ITE(op_LT_3, op_NEG_4, DUP(Rs)); + RzILOpEffect *op_ASSIGN_6 = WRITE_REG(bundle, Rd_op, cond_5); + + RzILOpEffect *instruction_sequence = op_ASSIGN_6; + return instruction_sequence; +} + +// Rdd = abs(Rss) +RzILOpEffect *hex_il_op_a2_absp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // Rdd = ((Rss < ((st64) 0x0)) ? (-Rss) : Rss); + RzILOpPure *op_LT_4 = SLT(Rss, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_NEG_5 = NEG(DUP(Rss)); + RzILOpPure *cond_6 = ITE(op_LT_4, op_NEG_5, DUP(Rss)); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rdd_op, cond_6); + + RzILOpEffect *instruction_sequence = op_ASSIGN_7; + return instruction_sequence; +} + +// Rd = abs(Rs):sat +RzILOpEffect *hex_il_op_a2_abssat(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_37 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) Rs) < ((st64) 0x0)) ? (-((st64) Rs)) : ((st64) Rs))), 0x0, 0x20) == ((((st64) Rs) < ((st64) 0x0)) ? (-((st64) Rs)) : ((st64) Rs)))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) Rs) < ((st64) 0x0)) ? (-((st64) Rs)) : ((st64) Rs)) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_LT_8 = SLT(CAST(64, MSB(Rs), DUP(Rs)), CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_NEG_10 = NEG(CAST(64, MSB(DUP(Rs)), DUP(Rs))); + RzILOpPure *cond_12 = ITE(op_LT_8, op_NEG_10, CAST(64, MSB(DUP(Rs)), DUP(Rs))); + RzILOpPure *op_LT_21 = SLT(CAST(64, MSB(DUP(Rs)), DUP(Rs)), CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_NEG_23 = NEG(CAST(64, MSB(DUP(Rs)), DUP(Rs))); + RzILOpPure *cond_25 = ITE(op_LT_21, op_NEG_23, CAST(64, MSB(DUP(Rs)), DUP(Rs))); + RzILOpPure *op_EQ_26 = EQ(SEXTRACT64(CAST(64, IL_FALSE, cond_12), SN(32, 0), SN(32, 0x20)), cond_25); + RzILOpPure *op_LT_41 = SLT(CAST(64, MSB(DUP(Rs)), DUP(Rs)), CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_NEG_43 = NEG(CAST(64, MSB(DUP(Rs)), DUP(Rs))); + RzILOpPure *cond_45 = ITE(op_LT_41, op_NEG_43, CAST(64, MSB(DUP(Rs)), DUP(Rs))); + RzILOpPure *op_LT_48 = SLT(cond_45, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_53 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_54 = NEG(op_LSHIFT_53); + RzILOpPure *op_LSHIFT_59 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_62 = SUB(op_LSHIFT_59, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_63 = ITE(op_LT_48, op_NEG_54, op_SUB_62); + RzILOpEffect *gcc_expr_64 = BRANCH(op_EQ_26, EMPTY(), set_usr_field_call_37); + + // h_tmp0 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) Rs) < ((st64) 0x0)) ? (-((st64) Rs)) : ((st64) Rs))), 0x0, 0x20) == ((((st64) Rs) < ((st64) 0x0)) ? (-((st64) Rs)) : ((st64) Rs)))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) Rs) < ((st64) 0x0)) ? (-((st64) Rs)) : ((st64) Rs)) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_66 = SETL("h_tmp0", cond_63); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) Rs) < ((st64) ...; + RzILOpEffect *seq_67 = SEQN(2, gcc_expr_64, op_ASSIGN_hybrid_tmp_66); + + // Rd = ((st32) ((sextract64(((ut64) ((((st64) Rs) < ((st64) 0x0)) ? (-((st64) Rs)) : ((st64) Rs))), 0x0, 0x20) == ((((st64) Rs) < ((st64) 0x0)) ? (-((st64) Rs)) : ((st64) Rs))) ? ((((st64) Rs) < ((st64) 0x0)) ? (-((st64) Rs)) : ((st64) Rs)) : h_tmp0)); + RzILOpPure *op_LT_30 = SLT(CAST(64, MSB(DUP(Rs)), DUP(Rs)), CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_NEG_32 = NEG(CAST(64, MSB(DUP(Rs)), DUP(Rs))); + RzILOpPure *cond_34 = ITE(op_LT_30, op_NEG_32, CAST(64, MSB(DUP(Rs)), DUP(Rs))); + RzILOpPure *cond_68 = ITE(DUP(op_EQ_26), cond_34, VARL("h_tmp0")); + RzILOpEffect *op_ASSIGN_70 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_68), DUP(cond_68))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) Rs) < ((s ...; + RzILOpEffect *seq_71 = SEQN(2, seq_67, op_ASSIGN_70); + + RzILOpEffect *instruction_sequence = seq_71; + return instruction_sequence; +} + +// Rd = add(Rs,Rt) +RzILOpEffect *hex_il_op_a2_add(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = Rs + Rt; + RzILOpPure *op_ADD_3 = ADD(Rs, Rt); + RzILOpEffect *op_ASSIGN_4 = WRITE_REG(bundle, Rd_op, op_ADD_3); + + RzILOpEffect *instruction_sequence = op_ASSIGN_4; + return instruction_sequence; +} + +// Rd = add(Rt.h,Rs.h):<<16 +RzILOpEffect *hex_il_op_a2_addh_h16_hh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = (((st32) ((st16) ((Rt >> 0x10) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) << 0x10); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_13 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_13, SN(32, 0xffff)); + RzILOpPure *op_ADD_19 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_15), DUP(op_AND_15))), CAST(16, MSB(DUP(op_AND_15)), DUP(op_AND_15)))); + RzILOpPure *op_LSHIFT_21 = SHIFTL0(op_ADD_19, SN(32, 16)); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rd_op, op_LSHIFT_21); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Rd = add(Rt.h,Rs.l):<<16 +RzILOpEffect *hex_il_op_a2_addh_h16_hl(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = (((st32) ((st16) ((Rt >> 0x10) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) << 0x10); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_13 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_13, SN(32, 0xffff)); + RzILOpPure *op_ADD_19 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_15), DUP(op_AND_15))), CAST(16, MSB(DUP(op_AND_15)), DUP(op_AND_15)))); + RzILOpPure *op_LSHIFT_21 = SHIFTL0(op_ADD_19, SN(32, 16)); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rd_op, op_LSHIFT_21); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Rd = add(Rt.l,Rs.h):<<16 +RzILOpEffect *hex_il_op_a2_addh_h16_lh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = (((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) << 0x10); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_13 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_13, SN(32, 0xffff)); + RzILOpPure *op_ADD_19 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_15), DUP(op_AND_15))), CAST(16, MSB(DUP(op_AND_15)), DUP(op_AND_15)))); + RzILOpPure *op_LSHIFT_21 = SHIFTL0(op_ADD_19, SN(32, 16)); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rd_op, op_LSHIFT_21); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Rd = add(Rt.l,Rs.l):<<16 +RzILOpEffect *hex_il_op_a2_addh_h16_ll(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = (((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) << 0x10); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_13 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_13, SN(32, 0xffff)); + RzILOpPure *op_ADD_19 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_15), DUP(op_AND_15))), CAST(16, MSB(DUP(op_AND_15)), DUP(op_AND_15)))); + RzILOpPure *op_LSHIFT_21 = SHIFTL0(op_ADD_19, SN(32, 16)); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rd_op, op_LSHIFT_21); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Rd = add(Rt.h,Rs.h):sat:<<16 +RzILOpEffect *hex_il_op_a2_addh_h16_sat_hh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_66 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x10) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> 0x10) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rt >> 0x10) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(32, 0xffff)); + RzILOpPure *op_ADD_22 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18)))); + RzILOpPure *op_RSHIFT_31 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_33 = LOGAND(op_RSHIFT_31, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_38 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_38, SN(32, 0xffff)); + RzILOpPure *op_ADD_44 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_33), DUP(op_AND_33))), CAST(16, MSB(DUP(op_AND_33)), DUP(op_AND_33))), CAST(32, MSB(CAST(16, MSB(op_AND_40), DUP(op_AND_40))), CAST(16, MSB(DUP(op_AND_40)), DUP(op_AND_40)))); + RzILOpPure *op_EQ_46 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_22), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_ADD_44), DUP(op_ADD_44))); + RzILOpPure *op_RSHIFT_70 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_72 = LOGAND(op_RSHIFT_70, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_77 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_79 = LOGAND(op_RSHIFT_77, SN(32, 0xffff)); + RzILOpPure *op_ADD_83 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_72), DUP(op_AND_72))), CAST(16, MSB(DUP(op_AND_72)), DUP(op_AND_72))), CAST(32, MSB(CAST(16, MSB(op_AND_79), DUP(op_AND_79))), CAST(16, MSB(DUP(op_AND_79)), DUP(op_AND_79)))); + RzILOpPure *op_LT_85 = SLT(op_ADD_83, SN(32, 0)); + RzILOpPure *op_LSHIFT_90 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_91 = NEG(op_LSHIFT_90); + RzILOpPure *op_LSHIFT_96 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_99 = SUB(op_LSHIFT_96, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_100 = ITE(op_LT_85, op_NEG_91, op_SUB_99); + RzILOpEffect *gcc_expr_101 = BRANCH(op_EQ_46, EMPTY(), set_usr_field_call_66); + + // h_tmp1 = HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x10) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> 0x10) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rt >> 0x10) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_103 = SETL("h_tmp1", cond_100); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> ...; + RzILOpEffect *seq_104 = SEQN(2, gcc_expr_101, op_ASSIGN_hybrid_tmp_103); + + // Rd = ((st32) (((sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x10) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> 0x10) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x10) & 0xffff))))) ? ((st64) ((st32) ((st16) ((Rt >> 0x10) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) : h_tmp1) << 0x10)); + RzILOpPure *op_RSHIFT_50 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_52 = LOGAND(op_RSHIFT_50, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_57 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_59 = LOGAND(op_RSHIFT_57, SN(32, 0xffff)); + RzILOpPure *op_ADD_63 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_52), DUP(op_AND_52))), CAST(16, MSB(DUP(op_AND_52)), DUP(op_AND_52))), CAST(32, MSB(CAST(16, MSB(op_AND_59), DUP(op_AND_59))), CAST(16, MSB(DUP(op_AND_59)), DUP(op_AND_59)))); + RzILOpPure *cond_106 = ITE(DUP(op_EQ_46), CAST(64, MSB(op_ADD_63), DUP(op_ADD_63)), VARL("h_tmp1")); + RzILOpPure *op_LSHIFT_108 = SHIFTL0(cond_106, SN(32, 16)); + RzILOpEffect *op_ASSIGN_110 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(op_LSHIFT_108), DUP(op_LSHIFT_108))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((R ...; + RzILOpEffect *seq_111 = SEQN(2, seq_104, op_ASSIGN_110); + + RzILOpEffect *instruction_sequence = seq_111; + return instruction_sequence; +} + +// Rd = add(Rt.h,Rs.l):sat:<<16 +RzILOpEffect *hex_il_op_a2_addh_h16_sat_hl(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_66 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x10) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> 0x10) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rt >> 0x10) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(32, 0xffff)); + RzILOpPure *op_ADD_22 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18)))); + RzILOpPure *op_RSHIFT_31 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_33 = LOGAND(op_RSHIFT_31, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_38 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_38, SN(32, 0xffff)); + RzILOpPure *op_ADD_44 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_33), DUP(op_AND_33))), CAST(16, MSB(DUP(op_AND_33)), DUP(op_AND_33))), CAST(32, MSB(CAST(16, MSB(op_AND_40), DUP(op_AND_40))), CAST(16, MSB(DUP(op_AND_40)), DUP(op_AND_40)))); + RzILOpPure *op_EQ_46 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_22), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_ADD_44), DUP(op_ADD_44))); + RzILOpPure *op_RSHIFT_70 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_72 = LOGAND(op_RSHIFT_70, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_77 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_79 = LOGAND(op_RSHIFT_77, SN(32, 0xffff)); + RzILOpPure *op_ADD_83 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_72), DUP(op_AND_72))), CAST(16, MSB(DUP(op_AND_72)), DUP(op_AND_72))), CAST(32, MSB(CAST(16, MSB(op_AND_79), DUP(op_AND_79))), CAST(16, MSB(DUP(op_AND_79)), DUP(op_AND_79)))); + RzILOpPure *op_LT_85 = SLT(op_ADD_83, SN(32, 0)); + RzILOpPure *op_LSHIFT_90 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_91 = NEG(op_LSHIFT_90); + RzILOpPure *op_LSHIFT_96 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_99 = SUB(op_LSHIFT_96, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_100 = ITE(op_LT_85, op_NEG_91, op_SUB_99); + RzILOpEffect *gcc_expr_101 = BRANCH(op_EQ_46, EMPTY(), set_usr_field_call_66); + + // h_tmp2 = HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x10) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> 0x10) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rt >> 0x10) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_103 = SETL("h_tmp2", cond_100); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> ...; + RzILOpEffect *seq_104 = SEQN(2, gcc_expr_101, op_ASSIGN_hybrid_tmp_103); + + // Rd = ((st32) (((sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x10) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> 0x10) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x0) & 0xffff))))) ? ((st64) ((st32) ((st16) ((Rt >> 0x10) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) : h_tmp2) << 0x10)); + RzILOpPure *op_RSHIFT_50 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_52 = LOGAND(op_RSHIFT_50, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_57 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_59 = LOGAND(op_RSHIFT_57, SN(32, 0xffff)); + RzILOpPure *op_ADD_63 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_52), DUP(op_AND_52))), CAST(16, MSB(DUP(op_AND_52)), DUP(op_AND_52))), CAST(32, MSB(CAST(16, MSB(op_AND_59), DUP(op_AND_59))), CAST(16, MSB(DUP(op_AND_59)), DUP(op_AND_59)))); + RzILOpPure *cond_106 = ITE(DUP(op_EQ_46), CAST(64, MSB(op_ADD_63), DUP(op_ADD_63)), VARL("h_tmp2")); + RzILOpPure *op_LSHIFT_108 = SHIFTL0(cond_106, SN(32, 16)); + RzILOpEffect *op_ASSIGN_110 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(op_LSHIFT_108), DUP(op_LSHIFT_108))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((R ...; + RzILOpEffect *seq_111 = SEQN(2, seq_104, op_ASSIGN_110); + + RzILOpEffect *instruction_sequence = seq_111; + return instruction_sequence; +} + +// Rd = add(Rt.l,Rs.h):sat:<<16 +RzILOpEffect *hex_il_op_a2_addh_h16_sat_lh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_66 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(32, 0xffff)); + RzILOpPure *op_ADD_22 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18)))); + RzILOpPure *op_RSHIFT_31 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_33 = LOGAND(op_RSHIFT_31, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_38 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_38, SN(32, 0xffff)); + RzILOpPure *op_ADD_44 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_33), DUP(op_AND_33))), CAST(16, MSB(DUP(op_AND_33)), DUP(op_AND_33))), CAST(32, MSB(CAST(16, MSB(op_AND_40), DUP(op_AND_40))), CAST(16, MSB(DUP(op_AND_40)), DUP(op_AND_40)))); + RzILOpPure *op_EQ_46 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_22), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_ADD_44), DUP(op_ADD_44))); + RzILOpPure *op_RSHIFT_70 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_72 = LOGAND(op_RSHIFT_70, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_77 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_79 = LOGAND(op_RSHIFT_77, SN(32, 0xffff)); + RzILOpPure *op_ADD_83 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_72), DUP(op_AND_72))), CAST(16, MSB(DUP(op_AND_72)), DUP(op_AND_72))), CAST(32, MSB(CAST(16, MSB(op_AND_79), DUP(op_AND_79))), CAST(16, MSB(DUP(op_AND_79)), DUP(op_AND_79)))); + RzILOpPure *op_LT_85 = SLT(op_ADD_83, SN(32, 0)); + RzILOpPure *op_LSHIFT_90 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_91 = NEG(op_LSHIFT_90); + RzILOpPure *op_LSHIFT_96 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_99 = SUB(op_LSHIFT_96, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_100 = ITE(op_LT_85, op_NEG_91, op_SUB_99); + RzILOpEffect *gcc_expr_101 = BRANCH(op_EQ_46, EMPTY(), set_usr_field_call_66); + + // h_tmp3 = HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_103 = SETL("h_tmp3", cond_100); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> ...; + RzILOpEffect *seq_104 = SEQN(2, gcc_expr_101, op_ASSIGN_hybrid_tmp_103); + + // Rd = ((st32) (((sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x10) & 0xffff))))) ? ((st64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) : h_tmp3) << 0x10)); + RzILOpPure *op_RSHIFT_50 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_52 = LOGAND(op_RSHIFT_50, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_57 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_59 = LOGAND(op_RSHIFT_57, SN(32, 0xffff)); + RzILOpPure *op_ADD_63 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_52), DUP(op_AND_52))), CAST(16, MSB(DUP(op_AND_52)), DUP(op_AND_52))), CAST(32, MSB(CAST(16, MSB(op_AND_59), DUP(op_AND_59))), CAST(16, MSB(DUP(op_AND_59)), DUP(op_AND_59)))); + RzILOpPure *cond_106 = ITE(DUP(op_EQ_46), CAST(64, MSB(op_ADD_63), DUP(op_ADD_63)), VARL("h_tmp3")); + RzILOpPure *op_LSHIFT_108 = SHIFTL0(cond_106, SN(32, 16)); + RzILOpEffect *op_ASSIGN_110 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(op_LSHIFT_108), DUP(op_LSHIFT_108))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((R ...; + RzILOpEffect *seq_111 = SEQN(2, seq_104, op_ASSIGN_110); + + RzILOpEffect *instruction_sequence = seq_111; + return instruction_sequence; +} + +// Rd = add(Rt.l,Rs.l):sat:<<16 +RzILOpEffect *hex_il_op_a2_addh_h16_sat_ll(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_66 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(32, 0xffff)); + RzILOpPure *op_ADD_22 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18)))); + RzILOpPure *op_RSHIFT_31 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_33 = LOGAND(op_RSHIFT_31, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_38 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_38, SN(32, 0xffff)); + RzILOpPure *op_ADD_44 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_33), DUP(op_AND_33))), CAST(16, MSB(DUP(op_AND_33)), DUP(op_AND_33))), CAST(32, MSB(CAST(16, MSB(op_AND_40), DUP(op_AND_40))), CAST(16, MSB(DUP(op_AND_40)), DUP(op_AND_40)))); + RzILOpPure *op_EQ_46 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_22), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_ADD_44), DUP(op_ADD_44))); + RzILOpPure *op_RSHIFT_70 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_72 = LOGAND(op_RSHIFT_70, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_77 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_79 = LOGAND(op_RSHIFT_77, SN(32, 0xffff)); + RzILOpPure *op_ADD_83 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_72), DUP(op_AND_72))), CAST(16, MSB(DUP(op_AND_72)), DUP(op_AND_72))), CAST(32, MSB(CAST(16, MSB(op_AND_79), DUP(op_AND_79))), CAST(16, MSB(DUP(op_AND_79)), DUP(op_AND_79)))); + RzILOpPure *op_LT_85 = SLT(op_ADD_83, SN(32, 0)); + RzILOpPure *op_LSHIFT_90 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_91 = NEG(op_LSHIFT_90); + RzILOpPure *op_LSHIFT_96 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_99 = SUB(op_LSHIFT_96, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_100 = ITE(op_LT_85, op_NEG_91, op_SUB_99); + RzILOpEffect *gcc_expr_101 = BRANCH(op_EQ_46, EMPTY(), set_usr_field_call_66); + + // h_tmp4 = HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_103 = SETL("h_tmp4", cond_100); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> ...; + RzILOpEffect *seq_104 = SEQN(2, gcc_expr_101, op_ASSIGN_hybrid_tmp_103); + + // Rd = ((st32) (((sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x0) & 0xffff))))) ? ((st64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) : h_tmp4) << 0x10)); + RzILOpPure *op_RSHIFT_50 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_52 = LOGAND(op_RSHIFT_50, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_57 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_59 = LOGAND(op_RSHIFT_57, SN(32, 0xffff)); + RzILOpPure *op_ADD_63 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_52), DUP(op_AND_52))), CAST(16, MSB(DUP(op_AND_52)), DUP(op_AND_52))), CAST(32, MSB(CAST(16, MSB(op_AND_59), DUP(op_AND_59))), CAST(16, MSB(DUP(op_AND_59)), DUP(op_AND_59)))); + RzILOpPure *cond_106 = ITE(DUP(op_EQ_46), CAST(64, MSB(op_ADD_63), DUP(op_ADD_63)), VARL("h_tmp4")); + RzILOpPure *op_LSHIFT_108 = SHIFTL0(cond_106, SN(32, 16)); + RzILOpEffect *op_ASSIGN_110 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(op_LSHIFT_108), DUP(op_LSHIFT_108))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((R ...; + RzILOpEffect *seq_111 = SEQN(2, seq_104, op_ASSIGN_110); + + RzILOpEffect *instruction_sequence = seq_111; + return instruction_sequence; +} + +// Rd = add(Rt.l,Rs.h) +RzILOpEffect *hex_il_op_a2_addh_l16_hl(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = ((st32) sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))), 0x0, 0x10)); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(32, 0xffff)); + RzILOpPure *op_ADD_22 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18)))); + RzILOpEffect *op_ASSIGN_29 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_22), SN(32, 0), SN(32, 16))), SEXTRACT64(CAST(64, IL_FALSE, DUP(op_ADD_22)), SN(32, 0), SN(32, 16)))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_29; + return instruction_sequence; +} + +// Rd = add(Rt.l,Rs.l) +RzILOpEffect *hex_il_op_a2_addh_l16_ll(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = ((st32) sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))), 0x0, 0x10)); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(32, 0xffff)); + RzILOpPure *op_ADD_22 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18)))); + RzILOpEffect *op_ASSIGN_29 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_22), SN(32, 0), SN(32, 16))), SEXTRACT64(CAST(64, IL_FALSE, DUP(op_ADD_22)), SN(32, 0), SN(32, 16)))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_29; + return instruction_sequence; +} + +// Rd = add(Rt.l,Rs.h):sat +RzILOpEffect *hex_il_op_a2_addh_l16_sat_hl(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_66 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(32, 0xffff)); + RzILOpPure *op_ADD_22 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18)))); + RzILOpPure *op_RSHIFT_31 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_33 = LOGAND(op_RSHIFT_31, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_38 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_38, SN(32, 0xffff)); + RzILOpPure *op_ADD_44 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_33), DUP(op_AND_33))), CAST(16, MSB(DUP(op_AND_33)), DUP(op_AND_33))), CAST(32, MSB(CAST(16, MSB(op_AND_40), DUP(op_AND_40))), CAST(16, MSB(DUP(op_AND_40)), DUP(op_AND_40)))); + RzILOpPure *op_EQ_46 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_22), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_ADD_44), DUP(op_ADD_44))); + RzILOpPure *op_RSHIFT_70 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_72 = LOGAND(op_RSHIFT_70, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_77 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_79 = LOGAND(op_RSHIFT_77, SN(32, 0xffff)); + RzILOpPure *op_ADD_83 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_72), DUP(op_AND_72))), CAST(16, MSB(DUP(op_AND_72)), DUP(op_AND_72))), CAST(32, MSB(CAST(16, MSB(op_AND_79), DUP(op_AND_79))), CAST(16, MSB(DUP(op_AND_79)), DUP(op_AND_79)))); + RzILOpPure *op_LT_85 = SLT(op_ADD_83, SN(32, 0)); + RzILOpPure *op_LSHIFT_90 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_91 = NEG(op_LSHIFT_90); + RzILOpPure *op_LSHIFT_96 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_99 = SUB(op_LSHIFT_96, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_100 = ITE(op_LT_85, op_NEG_91, op_SUB_99); + RzILOpEffect *gcc_expr_101 = BRANCH(op_EQ_46, EMPTY(), set_usr_field_call_66); + + // h_tmp5 = HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_103 = SETL("h_tmp5", cond_100); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> ...; + RzILOpEffect *seq_104 = SEQN(2, gcc_expr_101, op_ASSIGN_hybrid_tmp_103); + + // Rd = ((st32) ((sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x10) & 0xffff))))) ? ((st64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) : h_tmp5)); + RzILOpPure *op_RSHIFT_50 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_52 = LOGAND(op_RSHIFT_50, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_57 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_59 = LOGAND(op_RSHIFT_57, SN(32, 0xffff)); + RzILOpPure *op_ADD_63 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_52), DUP(op_AND_52))), CAST(16, MSB(DUP(op_AND_52)), DUP(op_AND_52))), CAST(32, MSB(CAST(16, MSB(op_AND_59), DUP(op_AND_59))), CAST(16, MSB(DUP(op_AND_59)), DUP(op_AND_59)))); + RzILOpPure *cond_106 = ITE(DUP(op_EQ_46), CAST(64, MSB(op_ADD_63), DUP(op_ADD_63)), VARL("h_tmp5")); + RzILOpEffect *op_ASSIGN_108 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_106), DUP(cond_106))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((R ...; + RzILOpEffect *seq_109 = SEQN(2, seq_104, op_ASSIGN_108); + + RzILOpEffect *instruction_sequence = seq_109; + return instruction_sequence; +} + +// Rd = add(Rt.l,Rs.l):sat +RzILOpEffect *hex_il_op_a2_addh_l16_sat_ll(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_66 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(32, 0xffff)); + RzILOpPure *op_ADD_22 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18)))); + RzILOpPure *op_RSHIFT_31 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_33 = LOGAND(op_RSHIFT_31, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_38 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_38, SN(32, 0xffff)); + RzILOpPure *op_ADD_44 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_33), DUP(op_AND_33))), CAST(16, MSB(DUP(op_AND_33)), DUP(op_AND_33))), CAST(32, MSB(CAST(16, MSB(op_AND_40), DUP(op_AND_40))), CAST(16, MSB(DUP(op_AND_40)), DUP(op_AND_40)))); + RzILOpPure *op_EQ_46 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_22), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_ADD_44), DUP(op_ADD_44))); + RzILOpPure *op_RSHIFT_70 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_72 = LOGAND(op_RSHIFT_70, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_77 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_79 = LOGAND(op_RSHIFT_77, SN(32, 0xffff)); + RzILOpPure *op_ADD_83 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_72), DUP(op_AND_72))), CAST(16, MSB(DUP(op_AND_72)), DUP(op_AND_72))), CAST(32, MSB(CAST(16, MSB(op_AND_79), DUP(op_AND_79))), CAST(16, MSB(DUP(op_AND_79)), DUP(op_AND_79)))); + RzILOpPure *op_LT_85 = SLT(op_ADD_83, SN(32, 0)); + RzILOpPure *op_LSHIFT_90 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_91 = NEG(op_LSHIFT_90); + RzILOpPure *op_LSHIFT_96 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_99 = SUB(op_LSHIFT_96, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_100 = ITE(op_LT_85, op_NEG_91, op_SUB_99); + RzILOpEffect *gcc_expr_101 = BRANCH(op_EQ_46, EMPTY(), set_usr_field_call_66); + + // h_tmp6 = HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_103 = SETL("h_tmp6", cond_100); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> ...; + RzILOpEffect *seq_104 = SEQN(2, gcc_expr_101, op_ASSIGN_hybrid_tmp_103); + + // Rd = ((st32) ((sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x0) & 0xffff))))) ? ((st64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) + ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) : h_tmp6)); + RzILOpPure *op_RSHIFT_50 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_52 = LOGAND(op_RSHIFT_50, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_57 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_59 = LOGAND(op_RSHIFT_57, SN(32, 0xffff)); + RzILOpPure *op_ADD_63 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_52), DUP(op_AND_52))), CAST(16, MSB(DUP(op_AND_52)), DUP(op_AND_52))), CAST(32, MSB(CAST(16, MSB(op_AND_59), DUP(op_AND_59))), CAST(16, MSB(DUP(op_AND_59)), DUP(op_AND_59)))); + RzILOpPure *cond_106 = ITE(DUP(op_EQ_46), CAST(64, MSB(op_ADD_63), DUP(op_ADD_63)), VARL("h_tmp6")); + RzILOpEffect *op_ASSIGN_108 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_106), DUP(cond_106))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((R ...; + RzILOpEffect *seq_109 = SEQN(2, seq_104, op_ASSIGN_108); + + RzILOpEffect *instruction_sequence = seq_109; + return instruction_sequence; +} + +// Rd = add(Rs,Ii) +RzILOpEffect *hex_il_op_a2_addi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // Rd = Rs + s; + RzILOpPure *op_ADD_4 = ADD(Rs, VARL("s")); + RzILOpEffect *op_ASSIGN_5 = WRITE_REG(bundle, Rd_op, op_ADD_4); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_0, op_ASSIGN_5); + return instruction_sequence; +} + +// Rdd = add(Rss,Rtt) +RzILOpEffect *hex_il_op_a2_addp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rdd = Rss + Rtt; + RzILOpPure *op_ADD_3 = ADD(Rss, Rtt); + RzILOpEffect *op_ASSIGN_4 = WRITE_REG(bundle, Rdd_op, op_ADD_3); + + RzILOpEffect *instruction_sequence = op_ASSIGN_4; + return instruction_sequence; +} + +// Rdd = add(Rss,Rtt):sat +RzILOpEffect *hex_il_op_a2_addpsat(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + // Declare: ut64 __a; + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + // Declare: ut64 __b; + // Declare: ut64 __sum; + // Declare: ut64 __xor; + // Declare: ut64 __mask; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // __a = ((ut64) Rss); + RzILOpEffect *op_ASSIGN_3 = SETL("__a", CAST(64, IL_FALSE, Rss)); + + // __b = ((ut64) Rtt); + RzILOpEffect *op_ASSIGN_7 = SETL("__b", CAST(64, IL_FALSE, Rtt)); + + // __sum = __a + __b; + RzILOpPure *op_ADD_8 = ADD(VARL("__a"), VARL("__b")); + RzILOpEffect *op_ASSIGN_10 = SETL("__sum", op_ADD_8); + + // __xor = (__a ^ __b); + RzILOpPure *op_XOR_11 = LOGXOR(VARL("__a"), VARL("__b")); + RzILOpEffect *op_ASSIGN_13 = SETL("__xor", op_XOR_11); + + // __mask = 0x8000000000000000; + RzILOpEffect *op_ASSIGN_16 = SETL("__mask", UN(64, 0x8000000000000000)); + + // Rdd = ((st64) __sum); + RzILOpEffect *op_ASSIGN_20 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, VARL("__sum"))); + + // Rdd = 0x7fffffffffffffff; + RzILOpEffect *op_ASSIGN_25 = WRITE_REG(bundle, Rdd_op, SN(64, 0x7fffffffffffffff)); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_28 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // Rdd = 0x8000000000000000; + RzILOpEffect *op_ASSIGN_30 = WRITE_REG(bundle, Rdd_op, SN(64, 0x8000000000000000)); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_33 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // seq(Rdd = 0x7fffffffffffffff; set_usr_field(bundle, HEX_REG_FIEL ...; + RzILOpEffect *seq_then_34 = SEQN(2, op_ASSIGN_25, set_usr_field_call_28); + + // seq(Rdd = 0x8000000000000000; set_usr_field(bundle, HEX_REG_FIEL ...; + RzILOpEffect *seq_else_35 = SEQN(2, op_ASSIGN_30, set_usr_field_call_33); + + // if ((__sum & __mask)) {seq(Rdd = 0x7fffffffffffffff; set_usr_field(bundle, HEX_REG_FIEL ...} else {seq(Rdd = 0x8000000000000000; set_usr_field(bundle, HEX_REG_FIEL ...}; + RzILOpPure *op_AND_23 = LOGAND(VARL("__sum"), VARL("__mask")); + RzILOpEffect *branch_36 = BRANCH(NON_ZERO(op_AND_23), seq_then_34, seq_else_35); + + // Rdd = ((st64) __sum); + RzILOpEffect *op_ASSIGN_38 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, VARL("__sum"))); + + // seq(if ((__sum & __mask)) {seq(Rdd = 0x7fffffffffffffff; set_usr ...; + RzILOpEffect *seq_then_39 = branch_36; + + // seq(Rdd = ((st64) __sum)); + RzILOpEffect *seq_else_40 = op_ASSIGN_38; + + // if (((__a ^ __sum) & __mask)) {seq(if ((__sum & __mask)) {seq(Rdd = 0x7fffffffffffffff; set_usr ...} else {seq(Rdd = ((st64) __sum))}; + RzILOpPure *op_XOR_21 = LOGXOR(VARL("__a"), VARL("__sum")); + RzILOpPure *op_AND_22 = LOGAND(op_XOR_21, VARL("__mask")); + RzILOpEffect *branch_41 = BRANCH(NON_ZERO(op_AND_22), seq_then_39, seq_else_40); + + // seq(Rdd = ((st64) __sum)); + RzILOpEffect *seq_then_42 = op_ASSIGN_20; + + // seq(if (((__a ^ __sum) & __mask)) {seq(if ((__sum & __mask)) {se ...; + RzILOpEffect *seq_else_43 = branch_41; + + // if ((__xor & __mask)) {seq(Rdd = ((st64) __sum))} else {seq(if (((__a ^ __sum) & __mask)) {seq(if ((__sum & __mask)) {se ...}; + RzILOpPure *op_AND_17 = LOGAND(VARL("__xor"), VARL("__mask")); + RzILOpEffect *branch_44 = BRANCH(NON_ZERO(op_AND_17), seq_then_42, seq_else_43); + + RzILOpEffect *instruction_sequence = SEQN(6, op_ASSIGN_3, op_ASSIGN_7, op_ASSIGN_10, op_ASSIGN_13, op_ASSIGN_16, branch_44); + return instruction_sequence; +} + +// Rd = add(Rs,Rt):sat +RzILOpEffect *hex_il_op_a2_addsat(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_23 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rs) + ((st64) Rt)), 0x0, 0x20) == ((st64) Rs) + ((st64) Rt))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rs) + ((st64) Rt) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_ADD_8 = ADD(CAST(64, MSB(Rs), DUP(Rs)), CAST(64, MSB(Rt), DUP(Rt))); + RzILOpPure *op_ADD_16 = ADD(CAST(64, MSB(DUP(Rs)), DUP(Rs)), CAST(64, MSB(DUP(Rt)), DUP(Rt))); + RzILOpPure *op_EQ_17 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_8), SN(32, 0), SN(32, 0x20)), op_ADD_16); + RzILOpPure *op_ADD_26 = ADD(CAST(64, MSB(DUP(Rs)), DUP(Rs)), CAST(64, MSB(DUP(Rt)), DUP(Rt))); + RzILOpPure *op_LT_29 = SLT(op_ADD_26, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_34 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_35 = NEG(op_LSHIFT_34); + RzILOpPure *op_LSHIFT_40 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_43 = SUB(op_LSHIFT_40, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_44 = ITE(op_LT_29, op_NEG_35, op_SUB_43); + RzILOpEffect *gcc_expr_45 = BRANCH(op_EQ_17, EMPTY(), set_usr_field_call_23); + + // h_tmp7 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rs) + ((st64) Rt)), 0x0, 0x20) == ((st64) Rs) + ((st64) Rt))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rs) + ((st64) Rt) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_47 = SETL("h_tmp7", cond_44); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rs) + ((st64) R ...; + RzILOpEffect *seq_48 = SEQN(2, gcc_expr_45, op_ASSIGN_hybrid_tmp_47); + + // Rd = ((st32) ((sextract64(((ut64) ((st64) Rs) + ((st64) Rt)), 0x0, 0x20) == ((st64) Rs) + ((st64) Rt)) ? ((st64) Rs) + ((st64) Rt) : h_tmp7)); + RzILOpPure *op_ADD_20 = ADD(CAST(64, MSB(DUP(Rs)), DUP(Rs)), CAST(64, MSB(DUP(Rt)), DUP(Rt))); + RzILOpPure *cond_49 = ITE(DUP(op_EQ_17), op_ADD_20, VARL("h_tmp7")); + RzILOpEffect *op_ASSIGN_51 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_49), DUP(cond_49))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rs) + ((st6 ...; + RzILOpEffect *seq_52 = SEQN(2, seq_48, op_ASSIGN_51); + + RzILOpEffect *instruction_sequence = seq_52; + return instruction_sequence; +} + +// Rdd = add(Rss,Rtt):raw:hi +RzILOpEffect *hex_il_op_a2_addsph(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // Rdd = Rtt + sextract64(((ut64) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))), 0x0, 0x20); + RzILOpPure *op_RSHIFT_9 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_11 = LOGAND(op_RSHIFT_9, SN(64, 0xffffffff)); + RzILOpPure *op_ADD_19 = ADD(Rtt, SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_11), DUP(op_AND_11))), CAST(32, MSB(DUP(op_AND_11)), DUP(op_AND_11)))), SN(32, 0), SN(32, 0x20))); + RzILOpEffect *op_ASSIGN_20 = WRITE_REG(bundle, Rdd_op, op_ADD_19); + + RzILOpEffect *instruction_sequence = op_ASSIGN_20; + return instruction_sequence; +} + +// Rdd = add(Rss,Rtt):raw:lo +RzILOpEffect *hex_il_op_a2_addspl(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // Rdd = Rtt + sextract64(((ut64) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))), 0x0, 0x20); + RzILOpPure *op_RSHIFT_9 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_11 = LOGAND(op_RSHIFT_9, SN(64, 0xffffffff)); + RzILOpPure *op_ADD_19 = ADD(Rtt, SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_11), DUP(op_AND_11))), CAST(32, MSB(DUP(op_AND_11)), DUP(op_AND_11)))), SN(32, 0), SN(32, 0x20))); + RzILOpEffect *op_ASSIGN_20 = WRITE_REG(bundle, Rdd_op, op_ADD_19); + + RzILOpEffect *instruction_sequence = op_ASSIGN_20; + return instruction_sequence; +} + +// Rd = and(Rs,Rt) +RzILOpEffect *hex_il_op_a2_and(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = (Rs & Rt); + RzILOpPure *op_AND_3 = LOGAND(Rs, Rt); + RzILOpEffect *op_ASSIGN_4 = WRITE_REG(bundle, Rd_op, op_AND_3); + + RzILOpEffect *instruction_sequence = op_ASSIGN_4; + return instruction_sequence; +} + +// Rd = and(Rs,Ii) +RzILOpEffect *hex_il_op_a2_andir(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // Rd = (Rs & s); + RzILOpPure *op_AND_4 = LOGAND(Rs, VARL("s")); + RzILOpEffect *op_ASSIGN_5 = WRITE_REG(bundle, Rd_op, op_AND_4); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_0, op_ASSIGN_5); + return instruction_sequence; +} + +// Rdd = and(Rss,Rtt) +RzILOpEffect *hex_il_op_a2_andp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rdd = (Rss & Rtt); + RzILOpPure *op_AND_3 = LOGAND(Rss, Rtt); + RzILOpEffect *op_ASSIGN_4 = WRITE_REG(bundle, Rdd_op, op_AND_3); + + RzILOpEffect *instruction_sequence = op_ASSIGN_4; + return instruction_sequence; +} + +// Rd = aslh(Rs) +RzILOpEffect *hex_il_op_a2_aslh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = (Rs << 0x10); + RzILOpPure *op_LSHIFT_3 = SHIFTL0(Rs, SN(32, 16)); + RzILOpEffect *op_ASSIGN_4 = WRITE_REG(bundle, Rd_op, op_LSHIFT_3); + + RzILOpEffect *instruction_sequence = op_ASSIGN_4; + return instruction_sequence; +} + +// Rd = asrh(Rs) +RzILOpEffect *hex_il_op_a2_asrh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = (Rs >> 0x10); + RzILOpPure *op_RSHIFT_3 = SHIFTRA(Rs, SN(32, 16)); + RzILOpEffect *op_ASSIGN_4 = WRITE_REG(bundle, Rd_op, op_RSHIFT_3); + + RzILOpEffect *instruction_sequence = op_ASSIGN_4; + return instruction_sequence; +} + +// Rd = combine(Rt.h,Rs.h) +RzILOpEffect *hex_il_op_a2_combine_hh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = (((st32) (((ut16) ((Rt >> 0x10) & 0xffff)) << 0x10)) | ((st32) ((ut16) ((Rs >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_10 = SHIFTL0(CAST(16, IL_FALSE, op_AND_7), SN(32, 16)); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_17 = LOGAND(op_RSHIFT_15, SN(32, 0xffff)); + RzILOpPure *op_OR_21 = LOGOR(CAST(32, IL_FALSE, op_LSHIFT_10), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_17))); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rd_op, op_OR_21); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Rd = combine(Rt.h,Rs.l) +RzILOpEffect *hex_il_op_a2_combine_hl(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = (((st32) (((ut16) ((Rt >> 0x10) & 0xffff)) << 0x10)) | ((st32) ((ut16) ((Rs >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_10 = SHIFTL0(CAST(16, IL_FALSE, op_AND_7), SN(32, 16)); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_17 = LOGAND(op_RSHIFT_15, SN(32, 0xffff)); + RzILOpPure *op_OR_21 = LOGOR(CAST(32, IL_FALSE, op_LSHIFT_10), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_17))); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rd_op, op_OR_21); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Rd = combine(Rt.l,Rs.h) +RzILOpEffect *hex_il_op_a2_combine_lh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = (((st32) (((ut16) ((Rt >> 0x0) & 0xffff)) << 0x10)) | ((st32) ((ut16) ((Rs >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_10 = SHIFTL0(CAST(16, IL_FALSE, op_AND_7), SN(32, 16)); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_17 = LOGAND(op_RSHIFT_15, SN(32, 0xffff)); + RzILOpPure *op_OR_21 = LOGOR(CAST(32, IL_FALSE, op_LSHIFT_10), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_17))); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rd_op, op_OR_21); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Rd = combine(Rt.l,Rs.l) +RzILOpEffect *hex_il_op_a2_combine_ll(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = (((st32) (((ut16) ((Rt >> 0x0) & 0xffff)) << 0x10)) | ((st32) ((ut16) ((Rs >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_10 = SHIFTL0(CAST(16, IL_FALSE, op_AND_7), SN(32, 16)); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_17 = LOGAND(op_RSHIFT_15, SN(32, 0xffff)); + RzILOpPure *op_OR_21 = LOGOR(CAST(32, IL_FALSE, op_LSHIFT_10), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_17))); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rd_op, op_OR_21); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Rdd = combine(Ii,II) +RzILOpEffect *hex_il_op_a2_combineii(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + RzILOpPure *S = SN(32, (st32)ISA2IMM(hi, 'S')); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // S = S; + RzILOpEffect *imm_assign_10 = SETL("S", S); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((st64) S) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_7 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_8 = LOGNOT(op_LSHIFT_7); + RzILOpPure *op_AND_9 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_8); + RzILOpPure *op_AND_14 = LOGAND(CAST(64, MSB(VARL("S")), VARL("S")), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_18 = SHIFTL0(op_AND_14, SN(32, 0)); + RzILOpPure *op_OR_19 = LOGOR(op_AND_9, op_LSHIFT_18); + RzILOpEffect *op_ASSIGN_20 = WRITE_REG(bundle, Rdd_op, op_OR_19); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((st64) s) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_26 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_27 = LOGNOT(op_LSHIFT_26); + RzILOpPure *op_AND_28 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_27); + RzILOpPure *op_AND_31 = LOGAND(CAST(64, MSB(VARL("s")), VARL("s")), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_35 = SHIFTL0(op_AND_31, SN(32, 0x20)); + RzILOpPure *op_OR_36 = LOGOR(op_AND_28, op_LSHIFT_35); + RzILOpEffect *op_ASSIGN_37 = WRITE_REG(bundle, Rdd_op, op_OR_36); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, imm_assign_10, op_ASSIGN_20, op_ASSIGN_37); + return instruction_sequence; +} + +// Rdd = combine(Rs,Rt) +RzILOpEffect *hex_il_op_a2_combinew(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((st64) Rt) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_AND_11 = LOGAND(CAST(64, MSB(Rt), DUP(Rt)), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_15 = SHIFTL0(op_AND_11, SN(32, 0)); + RzILOpPure *op_OR_16 = LOGOR(op_AND_7, op_LSHIFT_15); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Rdd_op, op_OR_16); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((st64) Rs) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_24 = LOGNOT(op_LSHIFT_23); + RzILOpPure *op_AND_25 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_24); + RzILOpPure *op_AND_29 = LOGAND(CAST(64, MSB(Rs), DUP(Rs)), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_33 = SHIFTL0(op_AND_29, SN(32, 0x20)); + RzILOpPure *op_OR_34 = LOGOR(op_AND_25, op_LSHIFT_33); + RzILOpEffect *op_ASSIGN_35 = WRITE_REG(bundle, Rdd_op, op_OR_34); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_17, op_ASSIGN_35); + return instruction_sequence; +} + +// Rd = max(Rs,Rt) +RzILOpEffect *hex_il_op_a2_max(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((Rs > Rt) ? Rs : Rt); + RzILOpPure *op_GT_3 = SGT(Rs, Rt); + RzILOpPure *cond_4 = ITE(op_GT_3, DUP(Rs), DUP(Rt)); + RzILOpEffect *op_ASSIGN_5 = WRITE_REG(bundle, Rd_op, cond_4); + + RzILOpEffect *instruction_sequence = op_ASSIGN_5; + return instruction_sequence; +} + +// Rdd = max(Rss,Rtt) +RzILOpEffect *hex_il_op_a2_maxp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rdd = ((Rss > Rtt) ? Rss : Rtt); + RzILOpPure *op_GT_3 = SGT(Rss, Rtt); + RzILOpPure *cond_4 = ITE(op_GT_3, DUP(Rss), DUP(Rtt)); + RzILOpEffect *op_ASSIGN_5 = WRITE_REG(bundle, Rdd_op, cond_4); + + RzILOpEffect *instruction_sequence = op_ASSIGN_5; + return instruction_sequence; +} + +// Rd = maxu(Rs,Rt) +RzILOpEffect *hex_il_op_a2_maxu(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((st32) ((((ut32) Rs) > ((ut32) Rt)) ? ((ut32) Rs) : ((ut32) Rt))); + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Rs), CAST(32, IL_FALSE, Rt)); + RzILOpPure *cond_8 = ITE(op_GT_5, CAST(32, IL_FALSE, DUP(Rs)), CAST(32, IL_FALSE, DUP(Rt))); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, cond_8)); + + RzILOpEffect *instruction_sequence = op_ASSIGN_10; + return instruction_sequence; +} + +// Rdd = maxu(Rss,Rtt) +RzILOpEffect *hex_il_op_a2_maxup(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rdd = ((st64) ((((ut64) Rss) > ((ut64) Rtt)) ? ((ut64) Rss) : ((ut64) Rtt))); + RzILOpPure *op_GT_5 = UGT(CAST(64, IL_FALSE, Rss), CAST(64, IL_FALSE, Rtt)); + RzILOpPure *cond_8 = ITE(op_GT_5, CAST(64, IL_FALSE, DUP(Rss)), CAST(64, IL_FALSE, DUP(Rtt))); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, cond_8)); + + RzILOpEffect *instruction_sequence = op_ASSIGN_10; + return instruction_sequence; +} + +// Rd = min(Rt,Rs) +RzILOpEffect *hex_il_op_a2_min(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = ((Rt < Rs) ? Rt : Rs); + RzILOpPure *op_LT_3 = SLT(Rt, Rs); + RzILOpPure *cond_4 = ITE(op_LT_3, DUP(Rt), DUP(Rs)); + RzILOpEffect *op_ASSIGN_5 = WRITE_REG(bundle, Rd_op, cond_4); + + RzILOpEffect *instruction_sequence = op_ASSIGN_5; + return instruction_sequence; +} + +// Rdd = min(Rtt,Rss) +RzILOpEffect *hex_il_op_a2_minp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // Rdd = ((Rtt < Rss) ? Rtt : Rss); + RzILOpPure *op_LT_3 = SLT(Rtt, Rss); + RzILOpPure *cond_4 = ITE(op_LT_3, DUP(Rtt), DUP(Rss)); + RzILOpEffect *op_ASSIGN_5 = WRITE_REG(bundle, Rdd_op, cond_4); + + RzILOpEffect *instruction_sequence = op_ASSIGN_5; + return instruction_sequence; +} + +// Rd = minu(Rt,Rs) +RzILOpEffect *hex_il_op_a2_minu(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = ((st32) ((((ut32) Rt) < ((ut32) Rs)) ? ((ut32) Rt) : ((ut32) Rs))); + RzILOpPure *op_LT_5 = ULT(CAST(32, IL_FALSE, Rt), CAST(32, IL_FALSE, Rs)); + RzILOpPure *cond_8 = ITE(op_LT_5, CAST(32, IL_FALSE, DUP(Rt)), CAST(32, IL_FALSE, DUP(Rs))); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, cond_8)); + + RzILOpEffect *instruction_sequence = op_ASSIGN_10; + return instruction_sequence; +} + +// Rdd = minu(Rtt,Rss) +RzILOpEffect *hex_il_op_a2_minup(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // Rdd = ((st64) ((((ut64) Rtt) < ((ut64) Rss)) ? ((ut64) Rtt) : ((ut64) Rss))); + RzILOpPure *op_LT_5 = ULT(CAST(64, IL_FALSE, Rtt), CAST(64, IL_FALSE, Rss)); + RzILOpPure *cond_8 = ITE(op_LT_5, CAST(64, IL_FALSE, DUP(Rtt)), CAST(64, IL_FALSE, DUP(Rss))); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, cond_8)); + + RzILOpEffect *instruction_sequence = op_ASSIGN_10; + return instruction_sequence; +} + +// Rdd = neg(Rss) +RzILOpEffect *hex_il_op_a2_negp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // Rdd = (-Rss); + RzILOpPure *op_NEG_2 = NEG(Rss); + RzILOpEffect *op_ASSIGN_3 = WRITE_REG(bundle, Rdd_op, op_NEG_2); + + RzILOpEffect *instruction_sequence = op_ASSIGN_3; + return instruction_sequence; +} + +// Rd = neg(Rs):sat +RzILOpEffect *hex_il_op_a2_negsat(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_19 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (-((st64) Rs))), 0x0, 0x20) == (-((st64) Rs)))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((-((st64) Rs)) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_NEG_6 = NEG(CAST(64, MSB(Rs), DUP(Rs))); + RzILOpPure *op_NEG_13 = NEG(CAST(64, MSB(DUP(Rs)), DUP(Rs))); + RzILOpPure *op_EQ_14 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_NEG_6), SN(32, 0), SN(32, 0x20)), op_NEG_13); + RzILOpPure *op_NEG_21 = NEG(CAST(64, MSB(DUP(Rs)), DUP(Rs))); + RzILOpPure *op_LT_24 = SLT(op_NEG_21, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_29 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_30 = NEG(op_LSHIFT_29); + RzILOpPure *op_LSHIFT_35 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_38 = SUB(op_LSHIFT_35, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_39 = ITE(op_LT_24, op_NEG_30, op_SUB_38); + RzILOpEffect *gcc_expr_40 = BRANCH(op_EQ_14, EMPTY(), set_usr_field_call_19); + + // h_tmp8 = HYB(gcc_expr_if ((sextract64(((ut64) (-((st64) Rs))), 0x0, 0x20) == (-((st64) Rs)))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((-((st64) Rs)) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_42 = SETL("h_tmp8", cond_39); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (-((st64) Rs))), 0x0, 0 ...; + RzILOpEffect *seq_43 = SEQN(2, gcc_expr_40, op_ASSIGN_hybrid_tmp_42); + + // Rd = ((st32) ((sextract64(((ut64) (-((st64) Rs))), 0x0, 0x20) == (-((st64) Rs))) ? (-((st64) Rs)) : h_tmp8)); + RzILOpPure *op_NEG_16 = NEG(CAST(64, MSB(DUP(Rs)), DUP(Rs))); + RzILOpPure *cond_44 = ITE(DUP(op_EQ_14), op_NEG_16, VARL("h_tmp8")); + RzILOpEffect *op_ASSIGN_46 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_44), DUP(cond_44))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (-((st64) Rs))), 0x ...; + RzILOpEffect *seq_47 = SEQN(2, seq_43, op_ASSIGN_46); + + RzILOpEffect *instruction_sequence = seq_47; + return instruction_sequence; +} + +// nop +RzILOpEffect *hex_il_op_a2_nop(HexInsnPktBundle *bundle) { + // READ + + RzILOpEffect *instruction_sequence = EMPTY(); + return instruction_sequence; +} + +// Rdd = not(Rss) +RzILOpEffect *hex_il_op_a2_notp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // Rdd = (~Rss); + RzILOpPure *op_NOT_2 = LOGNOT(Rss); + RzILOpEffect *op_ASSIGN_3 = WRITE_REG(bundle, Rdd_op, op_NOT_2); + + RzILOpEffect *instruction_sequence = op_ASSIGN_3; + return instruction_sequence; +} + +// Rd = or(Rs,Rt) +RzILOpEffect *hex_il_op_a2_or(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = (Rs | Rt); + RzILOpPure *op_OR_3 = LOGOR(Rs, Rt); + RzILOpEffect *op_ASSIGN_4 = WRITE_REG(bundle, Rd_op, op_OR_3); + + RzILOpEffect *instruction_sequence = op_ASSIGN_4; + return instruction_sequence; +} + +// Rd = or(Rs,Ii) +RzILOpEffect *hex_il_op_a2_orir(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // Rd = (Rs | s); + RzILOpPure *op_OR_4 = LOGOR(Rs, VARL("s")); + RzILOpEffect *op_ASSIGN_5 = WRITE_REG(bundle, Rd_op, op_OR_4); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_0, op_ASSIGN_5); + return instruction_sequence; +} + +// Rdd = or(Rss,Rtt) +RzILOpEffect *hex_il_op_a2_orp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rdd = (Rss | Rtt); + RzILOpPure *op_OR_3 = LOGOR(Rss, Rtt); + RzILOpEffect *op_ASSIGN_4 = WRITE_REG(bundle, Rdd_op, op_OR_3); + + RzILOpEffect *instruction_sequence = op_ASSIGN_4; + return instruction_sequence; +} + +// if (!Pu) Rd = add(Rs,Rt) +RzILOpEffect *hex_il_op_a2_paddf(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = Rs + Rt; + RzILOpPure *op_ADD_8 = ADD(Rs, Rt); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rd_op, op_ADD_8); + + // nop; + RzILOpEffect *nop_10 = NOP(); + + // seq(Rd = Rs + Rt); + RzILOpEffect *seq_then_11 = op_ASSIGN_9; + + // seq(nop); + RzILOpEffect *seq_else_12 = nop_10; + + // if (! (((st32) Pu) & 0x1)) {seq(Rd = Rs + Rt)} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_13 = BRANCH(op_INV_4, seq_then_11, seq_else_12); + + RzILOpEffect *instruction_sequence = branch_13; + return instruction_sequence; +} + +// if (!Pu.new) Rd = add(Rs,Rt) +RzILOpEffect *hex_il_op_a2_paddfnew(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_new_op = ISA2REG(hi, 'u', true); + RzILOpPure *Pu_new = READ_REG(pkt, Pu_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = Rs + Rt; + RzILOpPure *op_ADD_8 = ADD(Rs, Rt); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rd_op, op_ADD_8); + + // nop; + RzILOpEffect *nop_10 = NOP(); + + // seq(Rd = Rs + Rt); + RzILOpEffect *seq_then_11 = op_ASSIGN_9; + + // seq(nop); + RzILOpEffect *seq_else_12 = nop_10; + + // if (! (((st32) Pu_new) & 0x1)) {seq(Rd = Rs + Rt)} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu_new), DUP(Pu_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_13 = BRANCH(op_INV_4, seq_then_11, seq_else_12); + + RzILOpEffect *instruction_sequence = branch_13; + return instruction_sequence; +} + +// if (!Pu) Rd = add(Rs,Ii) +RzILOpEffect *hex_il_op_a2_paddif(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // Rd = Rs + s; + RzILOpPure *op_ADD_9 = ADD(Rs, VARL("s")); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, Rd_op, op_ADD_9); + + // nop; + RzILOpEffect *nop_11 = NOP(); + + // seq(s; Rd = Rs + s); + RzILOpEffect *seq_then_12 = op_ASSIGN_10; + + // seq(nop); + RzILOpEffect *seq_else_13 = nop_11; + + // if (! (((st32) Pu) & 0x1)) {seq(s; Rd = Rs + s)} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_14 = BRANCH(op_INV_4, seq_then_12, seq_else_13); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_14); + return instruction_sequence; +} + +// if (!Pu.new) Rd = add(Rs,Ii) +RzILOpEffect *hex_il_op_a2_paddifnew(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_new_op = ISA2REG(hi, 'u', true); + RzILOpPure *Pu_new = READ_REG(pkt, Pu_new_op, true); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // Rd = Rs + s; + RzILOpPure *op_ADD_9 = ADD(Rs, VARL("s")); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, Rd_op, op_ADD_9); + + // nop; + RzILOpEffect *nop_11 = NOP(); + + // seq(s; Rd = Rs + s); + RzILOpEffect *seq_then_12 = op_ASSIGN_10; + + // seq(nop); + RzILOpEffect *seq_else_13 = nop_11; + + // if (! (((st32) Pu_new) & 0x1)) {seq(s; Rd = Rs + s)} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu_new), DUP(Pu_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_14 = BRANCH(op_INV_4, seq_then_12, seq_else_13); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_14); + return instruction_sequence; +} + +// if (Pu) Rd = add(Rs,Ii) +RzILOpEffect *hex_il_op_a2_paddit(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // s = s; + RzILOpEffect *imm_assign_4 = SETL("s", s); + + // Rd = Rs + s; + RzILOpPure *op_ADD_8 = ADD(Rs, VARL("s")); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rd_op, op_ADD_8); + + // nop; + RzILOpEffect *nop_10 = NOP(); + + // seq(s; Rd = Rs + s); + RzILOpEffect *seq_then_11 = op_ASSIGN_9; + + // seq(nop); + RzILOpEffect *seq_else_12 = nop_10; + + // if ((((st32) Pu) & 0x1)) {seq(s; Rd = Rs + s)} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpEffect *branch_13 = BRANCH(NON_ZERO(op_AND_3), seq_then_11, seq_else_12); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_13); + return instruction_sequence; +} + +// if (Pu.new) Rd = add(Rs,Ii) +RzILOpEffect *hex_il_op_a2_padditnew(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_new_op = ISA2REG(hi, 'u', true); + RzILOpPure *Pu_new = READ_REG(pkt, Pu_new_op, true); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // s = s; + RzILOpEffect *imm_assign_4 = SETL("s", s); + + // Rd = Rs + s; + RzILOpPure *op_ADD_8 = ADD(Rs, VARL("s")); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rd_op, op_ADD_8); + + // nop; + RzILOpEffect *nop_10 = NOP(); + + // seq(s; Rd = Rs + s); + RzILOpEffect *seq_then_11 = op_ASSIGN_9; + + // seq(nop); + RzILOpEffect *seq_else_12 = nop_10; + + // if ((((st32) Pu_new) & 0x1)) {seq(s; Rd = Rs + s)} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu_new), DUP(Pu_new)), SN(32, 1)); + RzILOpEffect *branch_13 = BRANCH(NON_ZERO(op_AND_3), seq_then_11, seq_else_12); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_13); + return instruction_sequence; +} + +// if (Pu) Rd = add(Rs,Rt) +RzILOpEffect *hex_il_op_a2_paddt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = Rs + Rt; + RzILOpPure *op_ADD_7 = ADD(Rs, Rt); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rd_op, op_ADD_7); + + // nop; + RzILOpEffect *nop_9 = NOP(); + + // seq(Rd = Rs + Rt); + RzILOpEffect *seq_then_10 = op_ASSIGN_8; + + // seq(nop); + RzILOpEffect *seq_else_11 = nop_9; + + // if ((((st32) Pu) & 0x1)) {seq(Rd = Rs + Rt)} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpEffect *branch_12 = BRANCH(NON_ZERO(op_AND_3), seq_then_10, seq_else_11); + + RzILOpEffect *instruction_sequence = branch_12; + return instruction_sequence; +} + +// if (Pu.new) Rd = add(Rs,Rt) +RzILOpEffect *hex_il_op_a2_paddtnew(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_new_op = ISA2REG(hi, 'u', true); + RzILOpPure *Pu_new = READ_REG(pkt, Pu_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = Rs + Rt; + RzILOpPure *op_ADD_7 = ADD(Rs, Rt); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rd_op, op_ADD_7); + + // nop; + RzILOpEffect *nop_9 = NOP(); + + // seq(Rd = Rs + Rt); + RzILOpEffect *seq_then_10 = op_ASSIGN_8; + + // seq(nop); + RzILOpEffect *seq_else_11 = nop_9; + + // if ((((st32) Pu_new) & 0x1)) {seq(Rd = Rs + Rt)} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu_new), DUP(Pu_new)), SN(32, 1)); + RzILOpEffect *branch_12 = BRANCH(NON_ZERO(op_AND_3), seq_then_10, seq_else_11); + + RzILOpEffect *instruction_sequence = branch_12; + return instruction_sequence; +} + +// if (!Pu) Rd = and(Rs,Rt) +RzILOpEffect *hex_il_op_a2_pandf(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = (Rs & Rt); + RzILOpPure *op_AND_8 = LOGAND(Rs, Rt); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rd_op, op_AND_8); + + // nop; + RzILOpEffect *nop_10 = NOP(); + + // seq(Rd = (Rs & Rt)); + RzILOpEffect *seq_then_11 = op_ASSIGN_9; + + // seq(nop); + RzILOpEffect *seq_else_12 = nop_10; + + // if (! (((st32) Pu) & 0x1)) {seq(Rd = (Rs & Rt))} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_13 = BRANCH(op_INV_4, seq_then_11, seq_else_12); + + RzILOpEffect *instruction_sequence = branch_13; + return instruction_sequence; +} + +// if (!Pu.new) Rd = and(Rs,Rt) +RzILOpEffect *hex_il_op_a2_pandfnew(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_new_op = ISA2REG(hi, 'u', true); + RzILOpPure *Pu_new = READ_REG(pkt, Pu_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = (Rs & Rt); + RzILOpPure *op_AND_8 = LOGAND(Rs, Rt); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rd_op, op_AND_8); + + // nop; + RzILOpEffect *nop_10 = NOP(); + + // seq(Rd = (Rs & Rt)); + RzILOpEffect *seq_then_11 = op_ASSIGN_9; + + // seq(nop); + RzILOpEffect *seq_else_12 = nop_10; + + // if (! (((st32) Pu_new) & 0x1)) {seq(Rd = (Rs & Rt))} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu_new), DUP(Pu_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_13 = BRANCH(op_INV_4, seq_then_11, seq_else_12); + + RzILOpEffect *instruction_sequence = branch_13; + return instruction_sequence; +} + +// if (Pu) Rd = and(Rs,Rt) +RzILOpEffect *hex_il_op_a2_pandt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = (Rs & Rt); + RzILOpPure *op_AND_7 = LOGAND(Rs, Rt); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rd_op, op_AND_7); + + // nop; + RzILOpEffect *nop_9 = NOP(); + + // seq(Rd = (Rs & Rt)); + RzILOpEffect *seq_then_10 = op_ASSIGN_8; + + // seq(nop); + RzILOpEffect *seq_else_11 = nop_9; + + // if ((((st32) Pu) & 0x1)) {seq(Rd = (Rs & Rt))} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpEffect *branch_12 = BRANCH(NON_ZERO(op_AND_3), seq_then_10, seq_else_11); + + RzILOpEffect *instruction_sequence = branch_12; + return instruction_sequence; +} + +// if (Pu.new) Rd = and(Rs,Rt) +RzILOpEffect *hex_il_op_a2_pandtnew(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_new_op = ISA2REG(hi, 'u', true); + RzILOpPure *Pu_new = READ_REG(pkt, Pu_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = (Rs & Rt); + RzILOpPure *op_AND_7 = LOGAND(Rs, Rt); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rd_op, op_AND_7); + + // nop; + RzILOpEffect *nop_9 = NOP(); + + // seq(Rd = (Rs & Rt)); + RzILOpEffect *seq_then_10 = op_ASSIGN_8; + + // seq(nop); + RzILOpEffect *seq_else_11 = nop_9; + + // if ((((st32) Pu_new) & 0x1)) {seq(Rd = (Rs & Rt))} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu_new), DUP(Pu_new)), SN(32, 1)); + RzILOpEffect *branch_12 = BRANCH(NON_ZERO(op_AND_3), seq_then_10, seq_else_11); + + RzILOpEffect *instruction_sequence = branch_12; + return instruction_sequence; +} + +// if (!Pu) Rd = or(Rs,Rt) +RzILOpEffect *hex_il_op_a2_porf(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = (Rs | Rt); + RzILOpPure *op_OR_8 = LOGOR(Rs, Rt); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rd_op, op_OR_8); + + // nop; + RzILOpEffect *nop_10 = NOP(); + + // seq(Rd = (Rs | Rt)); + RzILOpEffect *seq_then_11 = op_ASSIGN_9; + + // seq(nop); + RzILOpEffect *seq_else_12 = nop_10; + + // if (! (((st32) Pu) & 0x1)) {seq(Rd = (Rs | Rt))} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_13 = BRANCH(op_INV_4, seq_then_11, seq_else_12); + + RzILOpEffect *instruction_sequence = branch_13; + return instruction_sequence; +} + +// if (!Pu.new) Rd = or(Rs,Rt) +RzILOpEffect *hex_il_op_a2_porfnew(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_new_op = ISA2REG(hi, 'u', true); + RzILOpPure *Pu_new = READ_REG(pkt, Pu_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = (Rs | Rt); + RzILOpPure *op_OR_8 = LOGOR(Rs, Rt); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rd_op, op_OR_8); + + // nop; + RzILOpEffect *nop_10 = NOP(); + + // seq(Rd = (Rs | Rt)); + RzILOpEffect *seq_then_11 = op_ASSIGN_9; + + // seq(nop); + RzILOpEffect *seq_else_12 = nop_10; + + // if (! (((st32) Pu_new) & 0x1)) {seq(Rd = (Rs | Rt))} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu_new), DUP(Pu_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_13 = BRANCH(op_INV_4, seq_then_11, seq_else_12); + + RzILOpEffect *instruction_sequence = branch_13; + return instruction_sequence; +} + +// if (Pu) Rd = or(Rs,Rt) +RzILOpEffect *hex_il_op_a2_port(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = (Rs | Rt); + RzILOpPure *op_OR_7 = LOGOR(Rs, Rt); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rd_op, op_OR_7); + + // nop; + RzILOpEffect *nop_9 = NOP(); + + // seq(Rd = (Rs | Rt)); + RzILOpEffect *seq_then_10 = op_ASSIGN_8; + + // seq(nop); + RzILOpEffect *seq_else_11 = nop_9; + + // if ((((st32) Pu) & 0x1)) {seq(Rd = (Rs | Rt))} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpEffect *branch_12 = BRANCH(NON_ZERO(op_AND_3), seq_then_10, seq_else_11); + + RzILOpEffect *instruction_sequence = branch_12; + return instruction_sequence; +} + +// if (Pu.new) Rd = or(Rs,Rt) +RzILOpEffect *hex_il_op_a2_portnew(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_new_op = ISA2REG(hi, 'u', true); + RzILOpPure *Pu_new = READ_REG(pkt, Pu_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = (Rs | Rt); + RzILOpPure *op_OR_7 = LOGOR(Rs, Rt); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rd_op, op_OR_7); + + // nop; + RzILOpEffect *nop_9 = NOP(); + + // seq(Rd = (Rs | Rt)); + RzILOpEffect *seq_then_10 = op_ASSIGN_8; + + // seq(nop); + RzILOpEffect *seq_else_11 = nop_9; + + // if ((((st32) Pu_new) & 0x1)) {seq(Rd = (Rs | Rt))} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu_new), DUP(Pu_new)), SN(32, 1)); + RzILOpEffect *branch_12 = BRANCH(NON_ZERO(op_AND_3), seq_then_10, seq_else_11); + + RzILOpEffect *instruction_sequence = branch_12; + return instruction_sequence; +} + +// if (!Pu) Rd = sub(Rt,Rs) +RzILOpEffect *hex_il_op_a2_psubf(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = Rt - Rs; + RzILOpPure *op_SUB_8 = SUB(Rt, Rs); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rd_op, op_SUB_8); + + // nop; + RzILOpEffect *nop_10 = NOP(); + + // seq(Rd = Rt - Rs); + RzILOpEffect *seq_then_11 = op_ASSIGN_9; + + // seq(nop); + RzILOpEffect *seq_else_12 = nop_10; + + // if (! (((st32) Pu) & 0x1)) {seq(Rd = Rt - Rs)} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_13 = BRANCH(op_INV_4, seq_then_11, seq_else_12); + + RzILOpEffect *instruction_sequence = branch_13; + return instruction_sequence; +} + +// if (!Pu.new) Rd = sub(Rt,Rs) +RzILOpEffect *hex_il_op_a2_psubfnew(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_new_op = ISA2REG(hi, 'u', true); + RzILOpPure *Pu_new = READ_REG(pkt, Pu_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = Rt - Rs; + RzILOpPure *op_SUB_8 = SUB(Rt, Rs); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rd_op, op_SUB_8); + + // nop; + RzILOpEffect *nop_10 = NOP(); + + // seq(Rd = Rt - Rs); + RzILOpEffect *seq_then_11 = op_ASSIGN_9; + + // seq(nop); + RzILOpEffect *seq_else_12 = nop_10; + + // if (! (((st32) Pu_new) & 0x1)) {seq(Rd = Rt - Rs)} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu_new), DUP(Pu_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_13 = BRANCH(op_INV_4, seq_then_11, seq_else_12); + + RzILOpEffect *instruction_sequence = branch_13; + return instruction_sequence; +} + +// if (Pu) Rd = sub(Rt,Rs) +RzILOpEffect *hex_il_op_a2_psubt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = Rt - Rs; + RzILOpPure *op_SUB_7 = SUB(Rt, Rs); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rd_op, op_SUB_7); + + // nop; + RzILOpEffect *nop_9 = NOP(); + + // seq(Rd = Rt - Rs); + RzILOpEffect *seq_then_10 = op_ASSIGN_8; + + // seq(nop); + RzILOpEffect *seq_else_11 = nop_9; + + // if ((((st32) Pu) & 0x1)) {seq(Rd = Rt - Rs)} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpEffect *branch_12 = BRANCH(NON_ZERO(op_AND_3), seq_then_10, seq_else_11); + + RzILOpEffect *instruction_sequence = branch_12; + return instruction_sequence; +} + +// if (Pu.new) Rd = sub(Rt,Rs) +RzILOpEffect *hex_il_op_a2_psubtnew(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_new_op = ISA2REG(hi, 'u', true); + RzILOpPure *Pu_new = READ_REG(pkt, Pu_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = Rt - Rs; + RzILOpPure *op_SUB_7 = SUB(Rt, Rs); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rd_op, op_SUB_7); + + // nop; + RzILOpEffect *nop_9 = NOP(); + + // seq(Rd = Rt - Rs); + RzILOpEffect *seq_then_10 = op_ASSIGN_8; + + // seq(nop); + RzILOpEffect *seq_else_11 = nop_9; + + // if ((((st32) Pu_new) & 0x1)) {seq(Rd = Rt - Rs)} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu_new), DUP(Pu_new)), SN(32, 1)); + RzILOpEffect *branch_12 = BRANCH(NON_ZERO(op_AND_3), seq_then_10, seq_else_11); + + RzILOpEffect *instruction_sequence = branch_12; + return instruction_sequence; +} + +// if (!Pu) Rd = xor(Rs,Rt) +RzILOpEffect *hex_il_op_a2_pxorf(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = (Rs ^ Rt); + RzILOpPure *op_XOR_8 = LOGXOR(Rs, Rt); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rd_op, op_XOR_8); + + // nop; + RzILOpEffect *nop_10 = NOP(); + + // seq(Rd = (Rs ^ Rt)); + RzILOpEffect *seq_then_11 = op_ASSIGN_9; + + // seq(nop); + RzILOpEffect *seq_else_12 = nop_10; + + // if (! (((st32) Pu) & 0x1)) {seq(Rd = (Rs ^ Rt))} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_13 = BRANCH(op_INV_4, seq_then_11, seq_else_12); + + RzILOpEffect *instruction_sequence = branch_13; + return instruction_sequence; +} + +// if (!Pu.new) Rd = xor(Rs,Rt) +RzILOpEffect *hex_il_op_a2_pxorfnew(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_new_op = ISA2REG(hi, 'u', true); + RzILOpPure *Pu_new = READ_REG(pkt, Pu_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = (Rs ^ Rt); + RzILOpPure *op_XOR_8 = LOGXOR(Rs, Rt); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rd_op, op_XOR_8); + + // nop; + RzILOpEffect *nop_10 = NOP(); + + // seq(Rd = (Rs ^ Rt)); + RzILOpEffect *seq_then_11 = op_ASSIGN_9; + + // seq(nop); + RzILOpEffect *seq_else_12 = nop_10; + + // if (! (((st32) Pu_new) & 0x1)) {seq(Rd = (Rs ^ Rt))} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu_new), DUP(Pu_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_13 = BRANCH(op_INV_4, seq_then_11, seq_else_12); + + RzILOpEffect *instruction_sequence = branch_13; + return instruction_sequence; +} + +// if (Pu) Rd = xor(Rs,Rt) +RzILOpEffect *hex_il_op_a2_pxort(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = (Rs ^ Rt); + RzILOpPure *op_XOR_7 = LOGXOR(Rs, Rt); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rd_op, op_XOR_7); + + // nop; + RzILOpEffect *nop_9 = NOP(); + + // seq(Rd = (Rs ^ Rt)); + RzILOpEffect *seq_then_10 = op_ASSIGN_8; + + // seq(nop); + RzILOpEffect *seq_else_11 = nop_9; + + // if ((((st32) Pu) & 0x1)) {seq(Rd = (Rs ^ Rt))} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpEffect *branch_12 = BRANCH(NON_ZERO(op_AND_3), seq_then_10, seq_else_11); + + RzILOpEffect *instruction_sequence = branch_12; + return instruction_sequence; +} + +// if (Pu.new) Rd = xor(Rs,Rt) +RzILOpEffect *hex_il_op_a2_pxortnew(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_new_op = ISA2REG(hi, 'u', true); + RzILOpPure *Pu_new = READ_REG(pkt, Pu_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = (Rs ^ Rt); + RzILOpPure *op_XOR_7 = LOGXOR(Rs, Rt); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rd_op, op_XOR_7); + + // nop; + RzILOpEffect *nop_9 = NOP(); + + // seq(Rd = (Rs ^ Rt)); + RzILOpEffect *seq_then_10 = op_ASSIGN_8; + + // seq(nop); + RzILOpEffect *seq_else_11 = nop_9; + + // if ((((st32) Pu_new) & 0x1)) {seq(Rd = (Rs ^ Rt))} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu_new), DUP(Pu_new)), SN(32, 1)); + RzILOpEffect *branch_12 = BRANCH(NON_ZERO(op_AND_3), seq_then_10, seq_else_11); + + RzILOpEffect *instruction_sequence = branch_12; + return instruction_sequence; +} + +// Rd = round(Rss):sat +RzILOpEffect *hex_il_op_a2_roundsat(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st64 tmp; + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + // Declare: ut64 __a; + // Declare: ut64 __b; + // Declare: ut64 __sum; + // Declare: ut64 __xor; + // Declare: ut64 __mask; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // __a = ((ut64) Rss); + RzILOpEffect *op_ASSIGN_4 = SETL("__a", CAST(64, IL_FALSE, Rss)); + + // __b = 0x80000000; + RzILOpEffect *op_ASSIGN_7 = SETL("__b", UN(64, 0x80000000)); + + // __sum = __a + __b; + RzILOpPure *op_ADD_8 = ADD(VARL("__a"), VARL("__b")); + RzILOpEffect *op_ASSIGN_10 = SETL("__sum", op_ADD_8); + + // __xor = (__a ^ __b); + RzILOpPure *op_XOR_11 = LOGXOR(VARL("__a"), VARL("__b")); + RzILOpEffect *op_ASSIGN_13 = SETL("__xor", op_XOR_11); + + // __mask = 0x8000000000000000; + RzILOpEffect *op_ASSIGN_16 = SETL("__mask", UN(64, 0x8000000000000000)); + + // tmp = ((st64) __sum); + RzILOpEffect *op_ASSIGN_19 = SETL("tmp", CAST(64, IL_FALSE, VARL("__sum"))); + + // tmp = 0x7fffffffffffffff; + RzILOpEffect *op_ASSIGN_24 = SETL("tmp", SN(64, 0x7fffffffffffffff)); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_27 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // tmp = 0x8000000000000000; + RzILOpEffect *op_ASSIGN_29 = SETL("tmp", SN(64, 0x8000000000000000)); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_32 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // seq(tmp = 0x7fffffffffffffff; set_usr_field(bundle, HEX_REG_FIEL ...; + RzILOpEffect *seq_then_33 = SEQN(2, op_ASSIGN_24, set_usr_field_call_27); + + // seq(tmp = 0x8000000000000000; set_usr_field(bundle, HEX_REG_FIEL ...; + RzILOpEffect *seq_else_34 = SEQN(2, op_ASSIGN_29, set_usr_field_call_32); + + // if ((__sum & __mask)) {seq(tmp = 0x7fffffffffffffff; set_usr_field(bundle, HEX_REG_FIEL ...} else {seq(tmp = 0x8000000000000000; set_usr_field(bundle, HEX_REG_FIEL ...}; + RzILOpPure *op_AND_22 = LOGAND(VARL("__sum"), VARL("__mask")); + RzILOpEffect *branch_35 = BRANCH(NON_ZERO(op_AND_22), seq_then_33, seq_else_34); + + // tmp = ((st64) __sum); + RzILOpEffect *op_ASSIGN_37 = SETL("tmp", CAST(64, IL_FALSE, VARL("__sum"))); + + // seq(if ((__sum & __mask)) {seq(tmp = 0x7fffffffffffffff; set_usr ...; + RzILOpEffect *seq_then_38 = branch_35; + + // seq(tmp = ((st64) __sum)); + RzILOpEffect *seq_else_39 = op_ASSIGN_37; + + // if (((__a ^ __sum) & __mask)) {seq(if ((__sum & __mask)) {seq(tmp = 0x7fffffffffffffff; set_usr ...} else {seq(tmp = ((st64) __sum))}; + RzILOpPure *op_XOR_20 = LOGXOR(VARL("__a"), VARL("__sum")); + RzILOpPure *op_AND_21 = LOGAND(op_XOR_20, VARL("__mask")); + RzILOpEffect *branch_40 = BRANCH(NON_ZERO(op_AND_21), seq_then_38, seq_else_39); + + // seq(tmp = ((st64) __sum)); + RzILOpEffect *seq_then_41 = op_ASSIGN_19; + + // seq(if (((__a ^ __sum) & __mask)) {seq(if ((__sum & __mask)) {se ...; + RzILOpEffect *seq_else_42 = branch_40; + + // if ((__xor & __mask)) {seq(tmp = ((st64) __sum))} else {seq(if (((__a ^ __sum) & __mask)) {seq(if ((__sum & __mask)) {se ...}; + RzILOpPure *op_AND_17 = LOGAND(VARL("__xor"), VARL("__mask")); + RzILOpEffect *branch_43 = BRANCH(NON_ZERO(op_AND_17), seq_then_41, seq_else_42); + + // Rd = ((st32) ((st64) ((st32) ((tmp >> 0x20) & 0xffffffff)))); + RzILOpPure *op_RSHIFT_48 = SHIFTRA(VARL("tmp"), SN(32, 0x20)); + RzILOpPure *op_AND_50 = LOGAND(op_RSHIFT_48, SN(64, 0xffffffff)); + RzILOpEffect *op_ASSIGN_54 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_50), DUP(op_AND_50))), CAST(32, MSB(DUP(op_AND_50)), DUP(op_AND_50)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_50)), DUP(op_AND_50))), CAST(32, MSB(DUP(op_AND_50)), DUP(op_AND_50))))); + + RzILOpEffect *instruction_sequence = SEQN(7, op_ASSIGN_4, op_ASSIGN_7, op_ASSIGN_10, op_ASSIGN_13, op_ASSIGN_16, branch_43, op_ASSIGN_54); + return instruction_sequence; +} + +// Rd = sat(Rss) +RzILOpEffect *hex_il_op_a2_sat(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_13 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) Rss), 0x0, 0x20) == Rss)) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((Rss < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_EQ_10 = EQ(SEXTRACT64(CAST(64, IL_FALSE, Rss), SN(32, 0), SN(32, 0x20)), DUP(Rss)); + RzILOpPure *op_LT_16 = SLT(DUP(Rss), CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_21 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_22 = NEG(op_LSHIFT_21); + RzILOpPure *op_LSHIFT_27 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_30 = SUB(op_LSHIFT_27, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_31 = ITE(op_LT_16, op_NEG_22, op_SUB_30); + RzILOpEffect *gcc_expr_32 = BRANCH(op_EQ_10, EMPTY(), set_usr_field_call_13); + + // h_tmp9 = HYB(gcc_expr_if ((sextract64(((ut64) Rss), 0x0, 0x20) == Rss)) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((Rss < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_34 = SETL("h_tmp9", cond_31); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) Rss), 0x0, 0x20) == Rss ...; + RzILOpEffect *seq_35 = SEQN(2, gcc_expr_32, op_ASSIGN_hybrid_tmp_34); + + // Rd = ((st32) ((sextract64(((ut64) Rss), 0x0, 0x20) == Rss) ? Rss : h_tmp9)); + RzILOpPure *cond_36 = ITE(DUP(op_EQ_10), DUP(Rss), VARL("h_tmp9")); + RzILOpEffect *op_ASSIGN_38 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_36), DUP(cond_36))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) Rss), 0x0, 0x20) == ...; + RzILOpEffect *seq_39 = SEQN(2, seq_35, op_ASSIGN_38); + + RzILOpEffect *instruction_sequence = seq_39; + return instruction_sequence; +} + +// Rd = satb(Rs) +RzILOpEffect *hex_il_op_a2_satb(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_14 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) Rs), 0x0, 0x8) == ((st64) Rs))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((Rs < 0x0) ? (-(0x1 << 0x7)) : (0x1 << 0x7) - ((st64) 0x1))); + RzILOpPure *op_EQ_11 = EQ(SEXTRACT64(CAST(64, IL_FALSE, Rs), SN(32, 0), SN(32, 8)), CAST(64, MSB(DUP(Rs)), DUP(Rs))); + RzILOpPure *op_LT_16 = SLT(DUP(Rs), SN(32, 0)); + RzILOpPure *op_LSHIFT_21 = SHIFTL0(SN(64, 1), SN(32, 7)); + RzILOpPure *op_NEG_22 = NEG(op_LSHIFT_21); + RzILOpPure *op_LSHIFT_27 = SHIFTL0(SN(64, 1), SN(32, 7)); + RzILOpPure *op_SUB_30 = SUB(op_LSHIFT_27, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_31 = ITE(op_LT_16, op_NEG_22, op_SUB_30); + RzILOpEffect *gcc_expr_32 = BRANCH(op_EQ_11, EMPTY(), set_usr_field_call_14); + + // h_tmp10 = HYB(gcc_expr_if ((sextract64(((ut64) Rs), 0x0, 0x8) == ((st64) Rs))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((Rs < 0x0) ? (-(0x1 << 0x7)) : (0x1 << 0x7) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_34 = SETL("h_tmp10", cond_31); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) Rs), 0x0, 0x8) == ((st6 ...; + RzILOpEffect *seq_35 = SEQN(2, gcc_expr_32, op_ASSIGN_hybrid_tmp_34); + + // Rd = ((st32) ((sextract64(((ut64) Rs), 0x0, 0x8) == ((st64) Rs)) ? ((st64) Rs) : h_tmp10)); + RzILOpPure *cond_37 = ITE(DUP(op_EQ_11), CAST(64, MSB(DUP(Rs)), DUP(Rs)), VARL("h_tmp10")); + RzILOpEffect *op_ASSIGN_39 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_37), DUP(cond_37))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) Rs), 0x0, 0x8) == ( ...; + RzILOpEffect *seq_40 = SEQN(2, seq_35, op_ASSIGN_39); + + RzILOpEffect *instruction_sequence = seq_40; + return instruction_sequence; +} + +// Rd = sath(Rs) +RzILOpEffect *hex_il_op_a2_sath(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_14 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) Rs), 0x0, 0x10) == ((st64) Rs))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((Rs < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_EQ_11 = EQ(SEXTRACT64(CAST(64, IL_FALSE, Rs), SN(32, 0), SN(32, 16)), CAST(64, MSB(DUP(Rs)), DUP(Rs))); + RzILOpPure *op_LT_16 = SLT(DUP(Rs), SN(32, 0)); + RzILOpPure *op_LSHIFT_21 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_22 = NEG(op_LSHIFT_21); + RzILOpPure *op_LSHIFT_27 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_30 = SUB(op_LSHIFT_27, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_31 = ITE(op_LT_16, op_NEG_22, op_SUB_30); + RzILOpEffect *gcc_expr_32 = BRANCH(op_EQ_11, EMPTY(), set_usr_field_call_14); + + // h_tmp11 = HYB(gcc_expr_if ((sextract64(((ut64) Rs), 0x0, 0x10) == ((st64) Rs))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((Rs < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_34 = SETL("h_tmp11", cond_31); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) Rs), 0x0, 0x10) == ((st ...; + RzILOpEffect *seq_35 = SEQN(2, gcc_expr_32, op_ASSIGN_hybrid_tmp_34); + + // Rd = ((st32) ((sextract64(((ut64) Rs), 0x0, 0x10) == ((st64) Rs)) ? ((st64) Rs) : h_tmp11)); + RzILOpPure *cond_37 = ITE(DUP(op_EQ_11), CAST(64, MSB(DUP(Rs)), DUP(Rs)), VARL("h_tmp11")); + RzILOpEffect *op_ASSIGN_39 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_37), DUP(cond_37))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) Rs), 0x0, 0x10) == ...; + RzILOpEffect *seq_40 = SEQN(2, seq_35, op_ASSIGN_39); + + RzILOpEffect *instruction_sequence = seq_40; + return instruction_sequence; +} + +// Rd = satub(Rs) +RzILOpEffect *hex_il_op_a2_satub(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_14 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((extract64(((ut64) Rs), 0x0, 0x8) == ((ut64) Rs))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((Rs < 0x0) ? ((st64) 0x0) : (0x1 << 0x8) - ((st64) 0x1))); + RzILOpPure *op_EQ_11 = EQ(EXTRACT64(CAST(64, IL_FALSE, Rs), SN(32, 0), SN(32, 8)), CAST(64, IL_FALSE, DUP(Rs))); + RzILOpPure *op_LT_16 = SLT(DUP(Rs), SN(32, 0)); + RzILOpPure *op_LSHIFT_20 = SHIFTL0(SN(64, 1), SN(32, 8)); + RzILOpPure *op_SUB_23 = SUB(op_LSHIFT_20, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_25 = ITE(op_LT_16, CAST(64, MSB(SN(32, 0)), SN(32, 0)), op_SUB_23); + RzILOpEffect *gcc_expr_26 = BRANCH(op_EQ_11, EMPTY(), set_usr_field_call_14); + + // h_tmp12 = HYB(gcc_expr_if ((extract64(((ut64) Rs), 0x0, 0x8) == ((ut64) Rs))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((Rs < 0x0) ? ((st64) 0x0) : (0x1 << 0x8) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_28 = SETL("h_tmp12", cond_25); + + // seq(HYB(gcc_expr_if ((extract64(((ut64) Rs), 0x0, 0x8) == ((ut64 ...; + RzILOpEffect *seq_29 = SEQN(2, gcc_expr_26, op_ASSIGN_hybrid_tmp_28); + + // Rd = ((st32) ((extract64(((ut64) Rs), 0x0, 0x8) == ((ut64) Rs)) ? ((st64) Rs) : h_tmp12)); + RzILOpPure *cond_31 = ITE(DUP(op_EQ_11), CAST(64, MSB(DUP(Rs)), DUP(Rs)), VARL("h_tmp12")); + RzILOpEffect *op_ASSIGN_33 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_31), DUP(cond_31))); + + // seq(seq(HYB(gcc_expr_if ((extract64(((ut64) Rs), 0x0, 0x8) == (( ...; + RzILOpEffect *seq_34 = SEQN(2, seq_29, op_ASSIGN_33); + + RzILOpEffect *instruction_sequence = seq_34; + return instruction_sequence; +} + +// Rd = satuh(Rs) +RzILOpEffect *hex_il_op_a2_satuh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_14 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((extract64(((ut64) Rs), 0x0, 0x10) == ((ut64) Rs))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((Rs < 0x0) ? ((st64) 0x0) : (0x1 << 0x10) - ((st64) 0x1))); + RzILOpPure *op_EQ_11 = EQ(EXTRACT64(CAST(64, IL_FALSE, Rs), SN(32, 0), SN(32, 16)), CAST(64, IL_FALSE, DUP(Rs))); + RzILOpPure *op_LT_16 = SLT(DUP(Rs), SN(32, 0)); + RzILOpPure *op_LSHIFT_20 = SHIFTL0(SN(64, 1), SN(32, 16)); + RzILOpPure *op_SUB_23 = SUB(op_LSHIFT_20, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_25 = ITE(op_LT_16, CAST(64, MSB(SN(32, 0)), SN(32, 0)), op_SUB_23); + RzILOpEffect *gcc_expr_26 = BRANCH(op_EQ_11, EMPTY(), set_usr_field_call_14); + + // h_tmp13 = HYB(gcc_expr_if ((extract64(((ut64) Rs), 0x0, 0x10) == ((ut64) Rs))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((Rs < 0x0) ? ((st64) 0x0) : (0x1 << 0x10) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_28 = SETL("h_tmp13", cond_25); + + // seq(HYB(gcc_expr_if ((extract64(((ut64) Rs), 0x0, 0x10) == ((ut6 ...; + RzILOpEffect *seq_29 = SEQN(2, gcc_expr_26, op_ASSIGN_hybrid_tmp_28); + + // Rd = ((st32) ((extract64(((ut64) Rs), 0x0, 0x10) == ((ut64) Rs)) ? ((st64) Rs) : h_tmp13)); + RzILOpPure *cond_31 = ITE(DUP(op_EQ_11), CAST(64, MSB(DUP(Rs)), DUP(Rs)), VARL("h_tmp13")); + RzILOpEffect *op_ASSIGN_33 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_31), DUP(cond_31))); + + // seq(seq(HYB(gcc_expr_if ((extract64(((ut64) Rs), 0x0, 0x10) == ( ...; + RzILOpEffect *seq_34 = SEQN(2, seq_29, op_ASSIGN_33); + + RzILOpEffect *instruction_sequence = seq_34; + return instruction_sequence; +} + +// Rd = sub(Rt,Rs) +RzILOpEffect *hex_il_op_a2_sub(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = Rt - Rs; + RzILOpPure *op_SUB_3 = SUB(Rt, Rs); + RzILOpEffect *op_ASSIGN_4 = WRITE_REG(bundle, Rd_op, op_SUB_3); + + RzILOpEffect *instruction_sequence = op_ASSIGN_4; + return instruction_sequence; +} + +// Rd = sub(Rt.h,Rs.h):<<16 +RzILOpEffect *hex_il_op_a2_subh_h16_hh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = (((st32) ((st16) ((Rt >> 0x10) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) << 0x10); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_13 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_13, SN(32, 0xffff)); + RzILOpPure *op_SUB_19 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_15), DUP(op_AND_15))), CAST(16, MSB(DUP(op_AND_15)), DUP(op_AND_15)))); + RzILOpPure *op_LSHIFT_21 = SHIFTL0(op_SUB_19, SN(32, 16)); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rd_op, op_LSHIFT_21); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Rd = sub(Rt.h,Rs.l):<<16 +RzILOpEffect *hex_il_op_a2_subh_h16_hl(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = (((st32) ((st16) ((Rt >> 0x10) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) << 0x10); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_13 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_13, SN(32, 0xffff)); + RzILOpPure *op_SUB_19 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_15), DUP(op_AND_15))), CAST(16, MSB(DUP(op_AND_15)), DUP(op_AND_15)))); + RzILOpPure *op_LSHIFT_21 = SHIFTL0(op_SUB_19, SN(32, 16)); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rd_op, op_LSHIFT_21); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Rd = sub(Rt.l,Rs.h):<<16 +RzILOpEffect *hex_il_op_a2_subh_h16_lh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = (((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) << 0x10); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_13 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_13, SN(32, 0xffff)); + RzILOpPure *op_SUB_19 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_15), DUP(op_AND_15))), CAST(16, MSB(DUP(op_AND_15)), DUP(op_AND_15)))); + RzILOpPure *op_LSHIFT_21 = SHIFTL0(op_SUB_19, SN(32, 16)); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rd_op, op_LSHIFT_21); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Rd = sub(Rt.l,Rs.l):<<16 +RzILOpEffect *hex_il_op_a2_subh_h16_ll(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = (((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) << 0x10); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_13 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_13, SN(32, 0xffff)); + RzILOpPure *op_SUB_19 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_15), DUP(op_AND_15))), CAST(16, MSB(DUP(op_AND_15)), DUP(op_AND_15)))); + RzILOpPure *op_LSHIFT_21 = SHIFTL0(op_SUB_19, SN(32, 16)); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rd_op, op_LSHIFT_21); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Rd = sub(Rt.h,Rs.h):sat:<<16 +RzILOpEffect *hex_il_op_a2_subh_h16_sat_hh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_66 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x10) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> 0x10) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rt >> 0x10) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(32, 0xffff)); + RzILOpPure *op_SUB_22 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18)))); + RzILOpPure *op_RSHIFT_31 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_33 = LOGAND(op_RSHIFT_31, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_38 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_38, SN(32, 0xffff)); + RzILOpPure *op_SUB_44 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_33), DUP(op_AND_33))), CAST(16, MSB(DUP(op_AND_33)), DUP(op_AND_33))), CAST(32, MSB(CAST(16, MSB(op_AND_40), DUP(op_AND_40))), CAST(16, MSB(DUP(op_AND_40)), DUP(op_AND_40)))); + RzILOpPure *op_EQ_46 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_22), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_SUB_44), DUP(op_SUB_44))); + RzILOpPure *op_RSHIFT_70 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_72 = LOGAND(op_RSHIFT_70, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_77 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_79 = LOGAND(op_RSHIFT_77, SN(32, 0xffff)); + RzILOpPure *op_SUB_83 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_72), DUP(op_AND_72))), CAST(16, MSB(DUP(op_AND_72)), DUP(op_AND_72))), CAST(32, MSB(CAST(16, MSB(op_AND_79), DUP(op_AND_79))), CAST(16, MSB(DUP(op_AND_79)), DUP(op_AND_79)))); + RzILOpPure *op_LT_85 = SLT(op_SUB_83, SN(32, 0)); + RzILOpPure *op_LSHIFT_90 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_91 = NEG(op_LSHIFT_90); + RzILOpPure *op_LSHIFT_96 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_99 = SUB(op_LSHIFT_96, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_100 = ITE(op_LT_85, op_NEG_91, op_SUB_99); + RzILOpEffect *gcc_expr_101 = BRANCH(op_EQ_46, EMPTY(), set_usr_field_call_66); + + // h_tmp14 = HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x10) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> 0x10) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rt >> 0x10) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_103 = SETL("h_tmp14", cond_100); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> ...; + RzILOpEffect *seq_104 = SEQN(2, gcc_expr_101, op_ASSIGN_hybrid_tmp_103); + + // Rd = ((st32) (((sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x10) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> 0x10) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x10) & 0xffff))))) ? ((st64) ((st32) ((st16) ((Rt >> 0x10) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) : h_tmp14) << 0x10)); + RzILOpPure *op_RSHIFT_50 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_52 = LOGAND(op_RSHIFT_50, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_57 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_59 = LOGAND(op_RSHIFT_57, SN(32, 0xffff)); + RzILOpPure *op_SUB_63 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_52), DUP(op_AND_52))), CAST(16, MSB(DUP(op_AND_52)), DUP(op_AND_52))), CAST(32, MSB(CAST(16, MSB(op_AND_59), DUP(op_AND_59))), CAST(16, MSB(DUP(op_AND_59)), DUP(op_AND_59)))); + RzILOpPure *cond_106 = ITE(DUP(op_EQ_46), CAST(64, MSB(op_SUB_63), DUP(op_SUB_63)), VARL("h_tmp14")); + RzILOpPure *op_LSHIFT_108 = SHIFTL0(cond_106, SN(32, 16)); + RzILOpEffect *op_ASSIGN_110 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(op_LSHIFT_108), DUP(op_LSHIFT_108))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((R ...; + RzILOpEffect *seq_111 = SEQN(2, seq_104, op_ASSIGN_110); + + RzILOpEffect *instruction_sequence = seq_111; + return instruction_sequence; +} + +// Rd = sub(Rt.h,Rs.l):sat:<<16 +RzILOpEffect *hex_il_op_a2_subh_h16_sat_hl(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_66 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x10) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> 0x10) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rt >> 0x10) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(32, 0xffff)); + RzILOpPure *op_SUB_22 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18)))); + RzILOpPure *op_RSHIFT_31 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_33 = LOGAND(op_RSHIFT_31, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_38 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_38, SN(32, 0xffff)); + RzILOpPure *op_SUB_44 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_33), DUP(op_AND_33))), CAST(16, MSB(DUP(op_AND_33)), DUP(op_AND_33))), CAST(32, MSB(CAST(16, MSB(op_AND_40), DUP(op_AND_40))), CAST(16, MSB(DUP(op_AND_40)), DUP(op_AND_40)))); + RzILOpPure *op_EQ_46 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_22), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_SUB_44), DUP(op_SUB_44))); + RzILOpPure *op_RSHIFT_70 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_72 = LOGAND(op_RSHIFT_70, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_77 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_79 = LOGAND(op_RSHIFT_77, SN(32, 0xffff)); + RzILOpPure *op_SUB_83 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_72), DUP(op_AND_72))), CAST(16, MSB(DUP(op_AND_72)), DUP(op_AND_72))), CAST(32, MSB(CAST(16, MSB(op_AND_79), DUP(op_AND_79))), CAST(16, MSB(DUP(op_AND_79)), DUP(op_AND_79)))); + RzILOpPure *op_LT_85 = SLT(op_SUB_83, SN(32, 0)); + RzILOpPure *op_LSHIFT_90 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_91 = NEG(op_LSHIFT_90); + RzILOpPure *op_LSHIFT_96 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_99 = SUB(op_LSHIFT_96, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_100 = ITE(op_LT_85, op_NEG_91, op_SUB_99); + RzILOpEffect *gcc_expr_101 = BRANCH(op_EQ_46, EMPTY(), set_usr_field_call_66); + + // h_tmp15 = HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x10) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> 0x10) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rt >> 0x10) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_103 = SETL("h_tmp15", cond_100); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> ...; + RzILOpEffect *seq_104 = SEQN(2, gcc_expr_101, op_ASSIGN_hybrid_tmp_103); + + // Rd = ((st32) (((sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x10) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> 0x10) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x0) & 0xffff))))) ? ((st64) ((st32) ((st16) ((Rt >> 0x10) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) : h_tmp15) << 0x10)); + RzILOpPure *op_RSHIFT_50 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_52 = LOGAND(op_RSHIFT_50, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_57 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_59 = LOGAND(op_RSHIFT_57, SN(32, 0xffff)); + RzILOpPure *op_SUB_63 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_52), DUP(op_AND_52))), CAST(16, MSB(DUP(op_AND_52)), DUP(op_AND_52))), CAST(32, MSB(CAST(16, MSB(op_AND_59), DUP(op_AND_59))), CAST(16, MSB(DUP(op_AND_59)), DUP(op_AND_59)))); + RzILOpPure *cond_106 = ITE(DUP(op_EQ_46), CAST(64, MSB(op_SUB_63), DUP(op_SUB_63)), VARL("h_tmp15")); + RzILOpPure *op_LSHIFT_108 = SHIFTL0(cond_106, SN(32, 16)); + RzILOpEffect *op_ASSIGN_110 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(op_LSHIFT_108), DUP(op_LSHIFT_108))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((R ...; + RzILOpEffect *seq_111 = SEQN(2, seq_104, op_ASSIGN_110); + + RzILOpEffect *instruction_sequence = seq_111; + return instruction_sequence; +} + +// Rd = sub(Rt.l,Rs.h):sat:<<16 +RzILOpEffect *hex_il_op_a2_subh_h16_sat_lh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_66 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(32, 0xffff)); + RzILOpPure *op_SUB_22 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18)))); + RzILOpPure *op_RSHIFT_31 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_33 = LOGAND(op_RSHIFT_31, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_38 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_38, SN(32, 0xffff)); + RzILOpPure *op_SUB_44 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_33), DUP(op_AND_33))), CAST(16, MSB(DUP(op_AND_33)), DUP(op_AND_33))), CAST(32, MSB(CAST(16, MSB(op_AND_40), DUP(op_AND_40))), CAST(16, MSB(DUP(op_AND_40)), DUP(op_AND_40)))); + RzILOpPure *op_EQ_46 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_22), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_SUB_44), DUP(op_SUB_44))); + RzILOpPure *op_RSHIFT_70 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_72 = LOGAND(op_RSHIFT_70, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_77 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_79 = LOGAND(op_RSHIFT_77, SN(32, 0xffff)); + RzILOpPure *op_SUB_83 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_72), DUP(op_AND_72))), CAST(16, MSB(DUP(op_AND_72)), DUP(op_AND_72))), CAST(32, MSB(CAST(16, MSB(op_AND_79), DUP(op_AND_79))), CAST(16, MSB(DUP(op_AND_79)), DUP(op_AND_79)))); + RzILOpPure *op_LT_85 = SLT(op_SUB_83, SN(32, 0)); + RzILOpPure *op_LSHIFT_90 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_91 = NEG(op_LSHIFT_90); + RzILOpPure *op_LSHIFT_96 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_99 = SUB(op_LSHIFT_96, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_100 = ITE(op_LT_85, op_NEG_91, op_SUB_99); + RzILOpEffect *gcc_expr_101 = BRANCH(op_EQ_46, EMPTY(), set_usr_field_call_66); + + // h_tmp16 = HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_103 = SETL("h_tmp16", cond_100); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> ...; + RzILOpEffect *seq_104 = SEQN(2, gcc_expr_101, op_ASSIGN_hybrid_tmp_103); + + // Rd = ((st32) (((sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x10) & 0xffff))))) ? ((st64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) : h_tmp16) << 0x10)); + RzILOpPure *op_RSHIFT_50 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_52 = LOGAND(op_RSHIFT_50, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_57 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_59 = LOGAND(op_RSHIFT_57, SN(32, 0xffff)); + RzILOpPure *op_SUB_63 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_52), DUP(op_AND_52))), CAST(16, MSB(DUP(op_AND_52)), DUP(op_AND_52))), CAST(32, MSB(CAST(16, MSB(op_AND_59), DUP(op_AND_59))), CAST(16, MSB(DUP(op_AND_59)), DUP(op_AND_59)))); + RzILOpPure *cond_106 = ITE(DUP(op_EQ_46), CAST(64, MSB(op_SUB_63), DUP(op_SUB_63)), VARL("h_tmp16")); + RzILOpPure *op_LSHIFT_108 = SHIFTL0(cond_106, SN(32, 16)); + RzILOpEffect *op_ASSIGN_110 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(op_LSHIFT_108), DUP(op_LSHIFT_108))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((R ...; + RzILOpEffect *seq_111 = SEQN(2, seq_104, op_ASSIGN_110); + + RzILOpEffect *instruction_sequence = seq_111; + return instruction_sequence; +} + +// Rd = sub(Rt.l,Rs.l):sat:<<16 +RzILOpEffect *hex_il_op_a2_subh_h16_sat_ll(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_66 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(32, 0xffff)); + RzILOpPure *op_SUB_22 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18)))); + RzILOpPure *op_RSHIFT_31 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_33 = LOGAND(op_RSHIFT_31, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_38 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_38, SN(32, 0xffff)); + RzILOpPure *op_SUB_44 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_33), DUP(op_AND_33))), CAST(16, MSB(DUP(op_AND_33)), DUP(op_AND_33))), CAST(32, MSB(CAST(16, MSB(op_AND_40), DUP(op_AND_40))), CAST(16, MSB(DUP(op_AND_40)), DUP(op_AND_40)))); + RzILOpPure *op_EQ_46 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_22), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_SUB_44), DUP(op_SUB_44))); + RzILOpPure *op_RSHIFT_70 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_72 = LOGAND(op_RSHIFT_70, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_77 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_79 = LOGAND(op_RSHIFT_77, SN(32, 0xffff)); + RzILOpPure *op_SUB_83 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_72), DUP(op_AND_72))), CAST(16, MSB(DUP(op_AND_72)), DUP(op_AND_72))), CAST(32, MSB(CAST(16, MSB(op_AND_79), DUP(op_AND_79))), CAST(16, MSB(DUP(op_AND_79)), DUP(op_AND_79)))); + RzILOpPure *op_LT_85 = SLT(op_SUB_83, SN(32, 0)); + RzILOpPure *op_LSHIFT_90 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_91 = NEG(op_LSHIFT_90); + RzILOpPure *op_LSHIFT_96 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_99 = SUB(op_LSHIFT_96, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_100 = ITE(op_LT_85, op_NEG_91, op_SUB_99); + RzILOpEffect *gcc_expr_101 = BRANCH(op_EQ_46, EMPTY(), set_usr_field_call_66); + + // h_tmp17 = HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_103 = SETL("h_tmp17", cond_100); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> ...; + RzILOpEffect *seq_104 = SEQN(2, gcc_expr_101, op_ASSIGN_hybrid_tmp_103); + + // Rd = ((st32) (((sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x0) & 0xffff))))) ? ((st64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) : h_tmp17) << 0x10)); + RzILOpPure *op_RSHIFT_50 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_52 = LOGAND(op_RSHIFT_50, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_57 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_59 = LOGAND(op_RSHIFT_57, SN(32, 0xffff)); + RzILOpPure *op_SUB_63 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_52), DUP(op_AND_52))), CAST(16, MSB(DUP(op_AND_52)), DUP(op_AND_52))), CAST(32, MSB(CAST(16, MSB(op_AND_59), DUP(op_AND_59))), CAST(16, MSB(DUP(op_AND_59)), DUP(op_AND_59)))); + RzILOpPure *cond_106 = ITE(DUP(op_EQ_46), CAST(64, MSB(op_SUB_63), DUP(op_SUB_63)), VARL("h_tmp17")); + RzILOpPure *op_LSHIFT_108 = SHIFTL0(cond_106, SN(32, 16)); + RzILOpEffect *op_ASSIGN_110 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(op_LSHIFT_108), DUP(op_LSHIFT_108))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((R ...; + RzILOpEffect *seq_111 = SEQN(2, seq_104, op_ASSIGN_110); + + RzILOpEffect *instruction_sequence = seq_111; + return instruction_sequence; +} + +// Rd = sub(Rt.l,Rs.h) +RzILOpEffect *hex_il_op_a2_subh_l16_hl(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = ((st32) sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))), 0x0, 0x10)); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(32, 0xffff)); + RzILOpPure *op_SUB_22 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18)))); + RzILOpEffect *op_ASSIGN_29 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_22), SN(32, 0), SN(32, 16))), SEXTRACT64(CAST(64, IL_FALSE, DUP(op_SUB_22)), SN(32, 0), SN(32, 16)))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_29; + return instruction_sequence; +} + +// Rd = sub(Rt.l,Rs.l) +RzILOpEffect *hex_il_op_a2_subh_l16_ll(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = ((st32) sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))), 0x0, 0x10)); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(32, 0xffff)); + RzILOpPure *op_SUB_22 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18)))); + RzILOpEffect *op_ASSIGN_29 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_22), SN(32, 0), SN(32, 16))), SEXTRACT64(CAST(64, IL_FALSE, DUP(op_SUB_22)), SN(32, 0), SN(32, 16)))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_29; + return instruction_sequence; +} + +// Rd = sub(Rt.l,Rs.h):sat +RzILOpEffect *hex_il_op_a2_subh_l16_sat_hl(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_66 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(32, 0xffff)); + RzILOpPure *op_SUB_22 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18)))); + RzILOpPure *op_RSHIFT_31 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_33 = LOGAND(op_RSHIFT_31, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_38 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_38, SN(32, 0xffff)); + RzILOpPure *op_SUB_44 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_33), DUP(op_AND_33))), CAST(16, MSB(DUP(op_AND_33)), DUP(op_AND_33))), CAST(32, MSB(CAST(16, MSB(op_AND_40), DUP(op_AND_40))), CAST(16, MSB(DUP(op_AND_40)), DUP(op_AND_40)))); + RzILOpPure *op_EQ_46 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_22), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_SUB_44), DUP(op_SUB_44))); + RzILOpPure *op_RSHIFT_70 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_72 = LOGAND(op_RSHIFT_70, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_77 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_79 = LOGAND(op_RSHIFT_77, SN(32, 0xffff)); + RzILOpPure *op_SUB_83 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_72), DUP(op_AND_72))), CAST(16, MSB(DUP(op_AND_72)), DUP(op_AND_72))), CAST(32, MSB(CAST(16, MSB(op_AND_79), DUP(op_AND_79))), CAST(16, MSB(DUP(op_AND_79)), DUP(op_AND_79)))); + RzILOpPure *op_LT_85 = SLT(op_SUB_83, SN(32, 0)); + RzILOpPure *op_LSHIFT_90 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_91 = NEG(op_LSHIFT_90); + RzILOpPure *op_LSHIFT_96 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_99 = SUB(op_LSHIFT_96, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_100 = ITE(op_LT_85, op_NEG_91, op_SUB_99); + RzILOpEffect *gcc_expr_101 = BRANCH(op_EQ_46, EMPTY(), set_usr_field_call_66); + + // h_tmp18 = HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_103 = SETL("h_tmp18", cond_100); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> ...; + RzILOpEffect *seq_104 = SEQN(2, gcc_expr_101, op_ASSIGN_hybrid_tmp_103); + + // Rd = ((st32) ((sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x10) & 0xffff))))) ? ((st64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) : h_tmp18)); + RzILOpPure *op_RSHIFT_50 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_52 = LOGAND(op_RSHIFT_50, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_57 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_59 = LOGAND(op_RSHIFT_57, SN(32, 0xffff)); + RzILOpPure *op_SUB_63 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_52), DUP(op_AND_52))), CAST(16, MSB(DUP(op_AND_52)), DUP(op_AND_52))), CAST(32, MSB(CAST(16, MSB(op_AND_59), DUP(op_AND_59))), CAST(16, MSB(DUP(op_AND_59)), DUP(op_AND_59)))); + RzILOpPure *cond_106 = ITE(DUP(op_EQ_46), CAST(64, MSB(op_SUB_63), DUP(op_SUB_63)), VARL("h_tmp18")); + RzILOpEffect *op_ASSIGN_108 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_106), DUP(cond_106))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((R ...; + RzILOpEffect *seq_109 = SEQN(2, seq_104, op_ASSIGN_108); + + RzILOpEffect *instruction_sequence = seq_109; + return instruction_sequence; +} + +// Rd = sub(Rt.l,Rs.l):sat +RzILOpEffect *hex_il_op_a2_subh_l16_sat_ll(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_66 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(32, 0xffff)); + RzILOpPure *op_SUB_22 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18)))); + RzILOpPure *op_RSHIFT_31 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_33 = LOGAND(op_RSHIFT_31, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_38 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_38, SN(32, 0xffff)); + RzILOpPure *op_SUB_44 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_33), DUP(op_AND_33))), CAST(16, MSB(DUP(op_AND_33)), DUP(op_AND_33))), CAST(32, MSB(CAST(16, MSB(op_AND_40), DUP(op_AND_40))), CAST(16, MSB(DUP(op_AND_40)), DUP(op_AND_40)))); + RzILOpPure *op_EQ_46 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_22), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_SUB_44), DUP(op_SUB_44))); + RzILOpPure *op_RSHIFT_70 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_72 = LOGAND(op_RSHIFT_70, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_77 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_79 = LOGAND(op_RSHIFT_77, SN(32, 0xffff)); + RzILOpPure *op_SUB_83 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_72), DUP(op_AND_72))), CAST(16, MSB(DUP(op_AND_72)), DUP(op_AND_72))), CAST(32, MSB(CAST(16, MSB(op_AND_79), DUP(op_AND_79))), CAST(16, MSB(DUP(op_AND_79)), DUP(op_AND_79)))); + RzILOpPure *op_LT_85 = SLT(op_SUB_83, SN(32, 0)); + RzILOpPure *op_LSHIFT_90 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_91 = NEG(op_LSHIFT_90); + RzILOpPure *op_LSHIFT_96 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_99 = SUB(op_LSHIFT_96, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_100 = ITE(op_LT_85, op_NEG_91, op_SUB_99); + RzILOpEffect *gcc_expr_101 = BRANCH(op_EQ_46, EMPTY(), set_usr_field_call_66); + + // h_tmp19 = HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_103 = SETL("h_tmp19", cond_100); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> ...; + RzILOpEffect *seq_104 = SEQN(2, gcc_expr_101, op_ASSIGN_hybrid_tmp_103); + + // Rd = ((st32) ((sextract64(((ut64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x0) & 0xffff))))) ? ((st64) ((st32) ((st16) ((Rt >> 0x0) & 0xffff))) - ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) : h_tmp19)); + RzILOpPure *op_RSHIFT_50 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_52 = LOGAND(op_RSHIFT_50, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_57 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_59 = LOGAND(op_RSHIFT_57, SN(32, 0xffff)); + RzILOpPure *op_SUB_63 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_52), DUP(op_AND_52))), CAST(16, MSB(DUP(op_AND_52)), DUP(op_AND_52))), CAST(32, MSB(CAST(16, MSB(op_AND_59), DUP(op_AND_59))), CAST(16, MSB(DUP(op_AND_59)), DUP(op_AND_59)))); + RzILOpPure *cond_106 = ITE(DUP(op_EQ_46), CAST(64, MSB(op_SUB_63), DUP(op_SUB_63)), VARL("h_tmp19")); + RzILOpEffect *op_ASSIGN_108 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_106), DUP(cond_106))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((R ...; + RzILOpEffect *seq_109 = SEQN(2, seq_104, op_ASSIGN_108); + + RzILOpEffect *instruction_sequence = seq_109; + return instruction_sequence; +} + +// Rdd = sub(Rtt,Rss) +RzILOpEffect *hex_il_op_a2_subp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // Rdd = Rtt - Rss; + RzILOpPure *op_SUB_3 = SUB(Rtt, Rss); + RzILOpEffect *op_ASSIGN_4 = WRITE_REG(bundle, Rdd_op, op_SUB_3); + + RzILOpEffect *instruction_sequence = op_ASSIGN_4; + return instruction_sequence; +} + +// Rd = sub(Ii,Rs) +RzILOpEffect *hex_il_op_a2_subri(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // Rd = s - Rs; + RzILOpPure *op_SUB_4 = SUB(VARL("s"), Rs); + RzILOpEffect *op_ASSIGN_5 = WRITE_REG(bundle, Rd_op, op_SUB_4); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_0, op_ASSIGN_5); + return instruction_sequence; +} + +// Rd = sub(Rt,Rs):sat +RzILOpEffect *hex_il_op_a2_subsat(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_23 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rt) - ((st64) Rs)), 0x0, 0x20) == ((st64) Rt) - ((st64) Rs))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rt) - ((st64) Rs) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_SUB_8 = SUB(CAST(64, MSB(Rt), DUP(Rt)), CAST(64, MSB(Rs), DUP(Rs))); + RzILOpPure *op_SUB_16 = SUB(CAST(64, MSB(DUP(Rt)), DUP(Rt)), CAST(64, MSB(DUP(Rs)), DUP(Rs))); + RzILOpPure *op_EQ_17 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_8), SN(32, 0), SN(32, 0x20)), op_SUB_16); + RzILOpPure *op_SUB_26 = SUB(CAST(64, MSB(DUP(Rt)), DUP(Rt)), CAST(64, MSB(DUP(Rs)), DUP(Rs))); + RzILOpPure *op_LT_29 = SLT(op_SUB_26, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_34 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_35 = NEG(op_LSHIFT_34); + RzILOpPure *op_LSHIFT_40 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_43 = SUB(op_LSHIFT_40, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_44 = ITE(op_LT_29, op_NEG_35, op_SUB_43); + RzILOpEffect *gcc_expr_45 = BRANCH(op_EQ_17, EMPTY(), set_usr_field_call_23); + + // h_tmp20 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rt) - ((st64) Rs)), 0x0, 0x20) == ((st64) Rt) - ((st64) Rs))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rt) - ((st64) Rs) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_47 = SETL("h_tmp20", cond_44); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rt) - ((st64) R ...; + RzILOpEffect *seq_48 = SEQN(2, gcc_expr_45, op_ASSIGN_hybrid_tmp_47); + + // Rd = ((st32) ((sextract64(((ut64) ((st64) Rt) - ((st64) Rs)), 0x0, 0x20) == ((st64) Rt) - ((st64) Rs)) ? ((st64) Rt) - ((st64) Rs) : h_tmp20)); + RzILOpPure *op_SUB_20 = SUB(CAST(64, MSB(DUP(Rt)), DUP(Rt)), CAST(64, MSB(DUP(Rs)), DUP(Rs))); + RzILOpPure *cond_49 = ITE(DUP(op_EQ_17), op_SUB_20, VARL("h_tmp20")); + RzILOpEffect *op_ASSIGN_51 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_49), DUP(cond_49))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rt) - ((st6 ...; + RzILOpEffect *seq_52 = SEQN(2, seq_48, op_ASSIGN_51); + + RzILOpEffect *instruction_sequence = seq_52; + return instruction_sequence; +} + +// Rd = vaddh(Rs,Rt) +RzILOpEffect *hex_il_op_a2_svaddh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp21 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp21", VARL("i")); + + // seq(h_tmp21 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((st16) ((Rs >> i * 0x10) & 0xffff))) + ((st32) ((st16) ((Rt >> i * 0x10) & 0xffff))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_16 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_14); + RzILOpPure *op_MUL_19 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_20 = SHIFTRA(Rs, op_MUL_19); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_20, SN(32, 0xffff)); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rt, op_MUL_26); + RzILOpPure *op_AND_29 = LOGAND(op_RSHIFT_27, SN(32, 0xffff)); + RzILOpPure *op_ADD_33 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_22), DUP(op_AND_22))), CAST(16, MSB(DUP(op_AND_22)), DUP(op_AND_22))), CAST(32, MSB(CAST(16, MSB(op_AND_29), DUP(op_AND_29))), CAST(16, MSB(DUP(op_AND_29)), DUP(op_AND_29)))); + RzILOpPure *op_AND_35 = LOGAND(op_ADD_33, SN(32, 0xffff)); + RzILOpPure *op_MUL_38 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_39 = SHIFTL0(CAST(64, IL_FALSE, op_AND_35), op_MUL_38); + RzILOpPure *op_OR_41 = LOGOR(CAST(64, IL_FALSE, op_AND_16), op_LSHIFT_39); + RzILOpEffect *op_ASSIGN_43 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_41)); + + // seq(h_tmp21; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << i ...; + RzILOpEffect *seq_45 = op_ASSIGN_43; + + // seq(seq(h_tmp21; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ...; + RzILOpEffect *seq_46 = SEQN(2, seq_45, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp21; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_47 = REPEAT(op_LT_4, seq_46); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp21; Rd = ((st32) ( ...; + RzILOpEffect *seq_48 = SEQN(2, op_ASSIGN_2, for_47); + + RzILOpEffect *instruction_sequence = seq_48; + return instruction_sequence; +} + +// Rd = vaddh(Rs,Rt):sat +RzILOpEffect *hex_il_op_a2_svaddhs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp22 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp22", VARL("i")); + + // seq(h_tmp22 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_76 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rs >> i * 0x10) & 0xffff))) + ((st32) ((st16) ((Rt >> i * 0x10) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rs >> i * 0x10) & 0xffff))) + ((st32) ((st16) ((Rt >> i * 0x10) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rs >> i * 0x10) & 0xffff))) + ((st32) ((st16) ((Rt >> i * 0x10) & 0xffff))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_MUL_22 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rs, op_MUL_22); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(32, 0xffff)); + RzILOpPure *op_MUL_29 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_30 = SHIFTRA(Rt, op_MUL_29); + RzILOpPure *op_AND_32 = LOGAND(op_RSHIFT_30, SN(32, 0xffff)); + RzILOpPure *op_ADD_36 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_25), DUP(op_AND_25))), CAST(16, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(CAST(16, MSB(op_AND_32), DUP(op_AND_32))), CAST(16, MSB(DUP(op_AND_32)), DUP(op_AND_32)))); + RzILOpPure *op_MUL_43 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_44 = SHIFTRA(DUP(Rs), op_MUL_43); + RzILOpPure *op_AND_46 = LOGAND(op_RSHIFT_44, SN(32, 0xffff)); + RzILOpPure *op_MUL_49 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_50 = SHIFTRA(DUP(Rt), op_MUL_49); + RzILOpPure *op_AND_52 = LOGAND(op_RSHIFT_50, SN(32, 0xffff)); + RzILOpPure *op_ADD_56 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_46), DUP(op_AND_46))), CAST(16, MSB(DUP(op_AND_46)), DUP(op_AND_46))), CAST(32, MSB(CAST(16, MSB(op_AND_52), DUP(op_AND_52))), CAST(16, MSB(DUP(op_AND_52)), DUP(op_AND_52)))); + RzILOpPure *op_EQ_58 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_36), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_ADD_56), DUP(op_ADD_56))); + RzILOpPure *op_MUL_78 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_79 = SHIFTRA(DUP(Rs), op_MUL_78); + RzILOpPure *op_AND_81 = LOGAND(op_RSHIFT_79, SN(32, 0xffff)); + RzILOpPure *op_MUL_84 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_85 = SHIFTRA(DUP(Rt), op_MUL_84); + RzILOpPure *op_AND_87 = LOGAND(op_RSHIFT_85, SN(32, 0xffff)); + RzILOpPure *op_ADD_91 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_81), DUP(op_AND_81))), CAST(16, MSB(DUP(op_AND_81)), DUP(op_AND_81))), CAST(32, MSB(CAST(16, MSB(op_AND_87), DUP(op_AND_87))), CAST(16, MSB(DUP(op_AND_87)), DUP(op_AND_87)))); + RzILOpPure *op_LT_93 = SLT(op_ADD_91, SN(32, 0)); + RzILOpPure *op_LSHIFT_98 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_99 = NEG(op_LSHIFT_98); + RzILOpPure *op_LSHIFT_104 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_107 = SUB(op_LSHIFT_104, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_108 = ITE(op_LT_93, op_NEG_99, op_SUB_107); + RzILOpEffect *gcc_expr_109 = BRANCH(op_EQ_58, EMPTY(), set_usr_field_call_76); + + // h_tmp23 = HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rs >> i * 0x10) & 0xffff))) + ((st32) ((st16) ((Rt >> i * 0x10) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rs >> i * 0x10) & 0xffff))) + ((st32) ((st16) ((Rt >> i * 0x10) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rs >> i * 0x10) & 0xffff))) + ((st32) ((st16) ((Rt >> i * 0x10) & 0xffff))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_111 = SETL("h_tmp23", cond_108); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rs >> ...; + RzILOpEffect *seq_112 = SEQN(2, gcc_expr_109, op_ASSIGN_hybrid_tmp_111); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << i * 0x10)))) | (((ut64) (((sextract64(((ut64) ((st32) ((st16) ((Rs >> i * 0x10) & 0xffff))) + ((st32) ((st16) ((Rt >> i * 0x10) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rs >> i * 0x10) & 0xffff))) + ((st32) ((st16) ((Rt >> i * 0x10) & 0xffff))))) ? ((st64) ((st32) ((st16) ((Rs >> i * 0x10) & 0xffff))) + ((st32) ((st16) ((Rt >> i * 0x10) & 0xffff)))) : h_tmp23) & ((st64) 0xffff))) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_16 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_14); + RzILOpPure *op_MUL_60 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_61 = SHIFTRA(DUP(Rs), op_MUL_60); + RzILOpPure *op_AND_63 = LOGAND(op_RSHIFT_61, SN(32, 0xffff)); + RzILOpPure *op_MUL_66 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_67 = SHIFTRA(DUP(Rt), op_MUL_66); + RzILOpPure *op_AND_69 = LOGAND(op_RSHIFT_67, SN(32, 0xffff)); + RzILOpPure *op_ADD_73 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_63), DUP(op_AND_63))), CAST(16, MSB(DUP(op_AND_63)), DUP(op_AND_63))), CAST(32, MSB(CAST(16, MSB(op_AND_69), DUP(op_AND_69))), CAST(16, MSB(DUP(op_AND_69)), DUP(op_AND_69)))); + RzILOpPure *cond_114 = ITE(DUP(op_EQ_58), CAST(64, MSB(op_ADD_73), DUP(op_ADD_73)), VARL("h_tmp23")); + RzILOpPure *op_AND_117 = LOGAND(cond_114, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_120 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_121 = SHIFTL0(CAST(64, IL_FALSE, op_AND_117), op_MUL_120); + RzILOpPure *op_OR_123 = LOGOR(CAST(64, IL_FALSE, op_AND_16), op_LSHIFT_121); + RzILOpEffect *op_ASSIGN_125 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_123)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((R ...; + RzILOpEffect *seq_126 = SEQN(2, seq_112, op_ASSIGN_125); + + // seq(h_tmp22; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32 ...; + RzILOpEffect *seq_128 = seq_126; + + // seq(seq(h_tmp22; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (( ...; + RzILOpEffect *seq_129 = SEQN(2, seq_128, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp22; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_130 = REPEAT(op_LT_4, seq_129); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp22; seq(seq(HYB(gc ...; + RzILOpEffect *seq_131 = SEQN(2, op_ASSIGN_2, for_130); + + RzILOpEffect *instruction_sequence = seq_131; + return instruction_sequence; +} + +// Rd = vadduh(Rs,Rt):sat +RzILOpEffect *hex_il_op_a2_svadduhs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp24 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp24", VARL("i")); + + // seq(h_tmp24 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_76 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((extract64(((ut64) ((st32) ((ut16) ((Rs >> i * 0x10) & 0xffff))) + ((st32) ((ut16) ((Rt >> i * 0x10) & 0xffff)))), 0x0, 0x10) == ((ut64) ((st32) ((ut16) ((Rs >> i * 0x10) & 0xffff))) + ((st32) ((ut16) ((Rt >> i * 0x10) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((ut16) ((Rs >> i * 0x10) & 0xffff))) + ((st32) ((ut16) ((Rt >> i * 0x10) & 0xffff))) < 0x0) ? ((st64) 0x0) : (0x1 << 0x10) - ((st64) 0x1))); + RzILOpPure *op_MUL_22 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rs, op_MUL_22); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(32, 0xffff)); + RzILOpPure *op_MUL_29 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_30 = SHIFTRA(Rt, op_MUL_29); + RzILOpPure *op_AND_32 = LOGAND(op_RSHIFT_30, SN(32, 0xffff)); + RzILOpPure *op_ADD_36 = ADD(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_25)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_32))); + RzILOpPure *op_MUL_43 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_44 = SHIFTRA(DUP(Rs), op_MUL_43); + RzILOpPure *op_AND_46 = LOGAND(op_RSHIFT_44, SN(32, 0xffff)); + RzILOpPure *op_MUL_49 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_50 = SHIFTRA(DUP(Rt), op_MUL_49); + RzILOpPure *op_AND_52 = LOGAND(op_RSHIFT_50, SN(32, 0xffff)); + RzILOpPure *op_ADD_56 = ADD(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_46)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_52))); + RzILOpPure *op_EQ_58 = EQ(EXTRACT64(CAST(64, IL_FALSE, op_ADD_36), SN(32, 0), SN(32, 16)), CAST(64, IL_FALSE, op_ADD_56)); + RzILOpPure *op_MUL_78 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_79 = SHIFTRA(DUP(Rs), op_MUL_78); + RzILOpPure *op_AND_81 = LOGAND(op_RSHIFT_79, SN(32, 0xffff)); + RzILOpPure *op_MUL_84 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_85 = SHIFTRA(DUP(Rt), op_MUL_84); + RzILOpPure *op_AND_87 = LOGAND(op_RSHIFT_85, SN(32, 0xffff)); + RzILOpPure *op_ADD_91 = ADD(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_81)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_87))); + RzILOpPure *op_LT_93 = SLT(op_ADD_91, SN(32, 0)); + RzILOpPure *op_LSHIFT_97 = SHIFTL0(SN(64, 1), SN(32, 16)); + RzILOpPure *op_SUB_100 = SUB(op_LSHIFT_97, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_102 = ITE(op_LT_93, CAST(64, MSB(SN(32, 0)), SN(32, 0)), op_SUB_100); + RzILOpEffect *gcc_expr_103 = BRANCH(op_EQ_58, EMPTY(), set_usr_field_call_76); + + // h_tmp25 = HYB(gcc_expr_if ((extract64(((ut64) ((st32) ((ut16) ((Rs >> i * 0x10) & 0xffff))) + ((st32) ((ut16) ((Rt >> i * 0x10) & 0xffff)))), 0x0, 0x10) == ((ut64) ((st32) ((ut16) ((Rs >> i * 0x10) & 0xffff))) + ((st32) ((ut16) ((Rt >> i * 0x10) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((ut16) ((Rs >> i * 0x10) & 0xffff))) + ((st32) ((ut16) ((Rt >> i * 0x10) & 0xffff))) < 0x0) ? ((st64) 0x0) : (0x1 << 0x10) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_105 = SETL("h_tmp25", cond_102); + + // seq(HYB(gcc_expr_if ((extract64(((ut64) ((st32) ((ut16) ((Rs >> ...; + RzILOpEffect *seq_106 = SEQN(2, gcc_expr_103, op_ASSIGN_hybrid_tmp_105); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << i * 0x10)))) | (((ut64) (((extract64(((ut64) ((st32) ((ut16) ((Rs >> i * 0x10) & 0xffff))) + ((st32) ((ut16) ((Rt >> i * 0x10) & 0xffff)))), 0x0, 0x10) == ((ut64) ((st32) ((ut16) ((Rs >> i * 0x10) & 0xffff))) + ((st32) ((ut16) ((Rt >> i * 0x10) & 0xffff))))) ? ((st64) ((st32) ((ut16) ((Rs >> i * 0x10) & 0xffff))) + ((st32) ((ut16) ((Rt >> i * 0x10) & 0xffff)))) : h_tmp25) & ((st64) 0xffff))) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_16 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_14); + RzILOpPure *op_MUL_60 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_61 = SHIFTRA(DUP(Rs), op_MUL_60); + RzILOpPure *op_AND_63 = LOGAND(op_RSHIFT_61, SN(32, 0xffff)); + RzILOpPure *op_MUL_66 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_67 = SHIFTRA(DUP(Rt), op_MUL_66); + RzILOpPure *op_AND_69 = LOGAND(op_RSHIFT_67, SN(32, 0xffff)); + RzILOpPure *op_ADD_73 = ADD(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_63)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_69))); + RzILOpPure *cond_108 = ITE(DUP(op_EQ_58), CAST(64, MSB(op_ADD_73), DUP(op_ADD_73)), VARL("h_tmp25")); + RzILOpPure *op_AND_111 = LOGAND(cond_108, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_114 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_115 = SHIFTL0(CAST(64, IL_FALSE, op_AND_111), op_MUL_114); + RzILOpPure *op_OR_117 = LOGOR(CAST(64, IL_FALSE, op_AND_16), op_LSHIFT_115); + RzILOpEffect *op_ASSIGN_119 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_117)); + + // seq(seq(HYB(gcc_expr_if ((extract64(((ut64) ((st32) ((ut16) ((Rs ...; + RzILOpEffect *seq_120 = SEQN(2, seq_106, op_ASSIGN_119); + + // seq(h_tmp24; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) ((st32) ...; + RzILOpEffect *seq_122 = seq_120; + + // seq(seq(h_tmp24; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) ((s ...; + RzILOpEffect *seq_123 = SEQN(2, seq_122, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp24; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) ((s ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_124 = REPEAT(op_LT_4, seq_123); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp24; seq(seq(HYB(gc ...; + RzILOpEffect *seq_125 = SEQN(2, op_ASSIGN_2, for_124); + + RzILOpEffect *instruction_sequence = seq_125; + return instruction_sequence; +} + +// Rd = vavgh(Rs,Rt) +RzILOpEffect *hex_il_op_a2_svavgh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp26 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp26", VARL("i")); + + // seq(h_tmp26 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << i * 0x10)))) | (((ut64) ((((st32) ((st16) ((Rs >> i * 0x10) & 0xffff))) + ((st32) ((st16) ((Rt >> i * 0x10) & 0xffff))) >> 0x1) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_16 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_14); + RzILOpPure *op_MUL_19 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_20 = SHIFTRA(Rs, op_MUL_19); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_20, SN(32, 0xffff)); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rt, op_MUL_26); + RzILOpPure *op_AND_29 = LOGAND(op_RSHIFT_27, SN(32, 0xffff)); + RzILOpPure *op_ADD_33 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_22), DUP(op_AND_22))), CAST(16, MSB(DUP(op_AND_22)), DUP(op_AND_22))), CAST(32, MSB(CAST(16, MSB(op_AND_29), DUP(op_AND_29))), CAST(16, MSB(DUP(op_AND_29)), DUP(op_AND_29)))); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(op_ADD_33, SN(32, 1)); + RzILOpPure *op_AND_37 = LOGAND(op_RSHIFT_35, SN(32, 0xffff)); + RzILOpPure *op_MUL_40 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_41 = SHIFTL0(CAST(64, IL_FALSE, op_AND_37), op_MUL_40); + RzILOpPure *op_OR_43 = LOGOR(CAST(64, IL_FALSE, op_AND_16), op_LSHIFT_41); + RzILOpEffect *op_ASSIGN_45 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_43)); + + // seq(h_tmp26; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << i ...; + RzILOpEffect *seq_47 = op_ASSIGN_45; + + // seq(seq(h_tmp26; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ...; + RzILOpEffect *seq_48 = SEQN(2, seq_47, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp26; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_49 = REPEAT(op_LT_4, seq_48); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp26; Rd = ((st32) ( ...; + RzILOpEffect *seq_50 = SEQN(2, op_ASSIGN_2, for_49); + + RzILOpEffect *instruction_sequence = seq_50; + return instruction_sequence; +} + +// Rd = vavgh(Rs,Rt):rnd +RzILOpEffect *hex_il_op_a2_svavghs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp27 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp27", VARL("i")); + + // seq(h_tmp27 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << i * 0x10)))) | (((ut64) ((((st32) ((st16) ((Rs >> i * 0x10) & 0xffff))) + ((st32) ((st16) ((Rt >> i * 0x10) & 0xffff))) + 0x1 >> 0x1) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_16 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_14); + RzILOpPure *op_MUL_19 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_20 = SHIFTRA(Rs, op_MUL_19); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_20, SN(32, 0xffff)); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rt, op_MUL_26); + RzILOpPure *op_AND_29 = LOGAND(op_RSHIFT_27, SN(32, 0xffff)); + RzILOpPure *op_ADD_33 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_22), DUP(op_AND_22))), CAST(16, MSB(DUP(op_AND_22)), DUP(op_AND_22))), CAST(32, MSB(CAST(16, MSB(op_AND_29), DUP(op_AND_29))), CAST(16, MSB(DUP(op_AND_29)), DUP(op_AND_29)))); + RzILOpPure *op_ADD_35 = ADD(op_ADD_33, SN(32, 1)); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(op_ADD_35, SN(32, 1)); + RzILOpPure *op_AND_39 = LOGAND(op_RSHIFT_37, SN(32, 0xffff)); + RzILOpPure *op_MUL_42 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_43 = SHIFTL0(CAST(64, IL_FALSE, op_AND_39), op_MUL_42); + RzILOpPure *op_OR_45 = LOGOR(CAST(64, IL_FALSE, op_AND_16), op_LSHIFT_43); + RzILOpEffect *op_ASSIGN_47 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_45)); + + // seq(h_tmp27; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << i ...; + RzILOpEffect *seq_49 = op_ASSIGN_47; + + // seq(seq(h_tmp27; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ...; + RzILOpEffect *seq_50 = SEQN(2, seq_49, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp27; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_51 = REPEAT(op_LT_4, seq_50); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp27; Rd = ((st32) ( ...; + RzILOpEffect *seq_52 = SEQN(2, op_ASSIGN_2, for_51); + + RzILOpEffect *instruction_sequence = seq_52; + return instruction_sequence; +} + +// Rd = vnavgh(Rt,Rs) +RzILOpEffect *hex_il_op_a2_svnavgh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp28 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp28", VARL("i")); + + // seq(h_tmp28 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << i * 0x10)))) | (((ut64) ((((st32) ((st16) ((Rt >> i * 0x10) & 0xffff))) - ((st32) ((st16) ((Rs >> i * 0x10) & 0xffff))) >> 0x1) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_16 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_14); + RzILOpPure *op_MUL_19 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_20 = SHIFTRA(Rt, op_MUL_19); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_20, SN(32, 0xffff)); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rs, op_MUL_26); + RzILOpPure *op_AND_29 = LOGAND(op_RSHIFT_27, SN(32, 0xffff)); + RzILOpPure *op_SUB_33 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_22), DUP(op_AND_22))), CAST(16, MSB(DUP(op_AND_22)), DUP(op_AND_22))), CAST(32, MSB(CAST(16, MSB(op_AND_29), DUP(op_AND_29))), CAST(16, MSB(DUP(op_AND_29)), DUP(op_AND_29)))); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(op_SUB_33, SN(32, 1)); + RzILOpPure *op_AND_37 = LOGAND(op_RSHIFT_35, SN(32, 0xffff)); + RzILOpPure *op_MUL_40 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_41 = SHIFTL0(CAST(64, IL_FALSE, op_AND_37), op_MUL_40); + RzILOpPure *op_OR_43 = LOGOR(CAST(64, IL_FALSE, op_AND_16), op_LSHIFT_41); + RzILOpEffect *op_ASSIGN_45 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_43)); + + // seq(h_tmp28; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << i ...; + RzILOpEffect *seq_47 = op_ASSIGN_45; + + // seq(seq(h_tmp28; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ...; + RzILOpEffect *seq_48 = SEQN(2, seq_47, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp28; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_49 = REPEAT(op_LT_4, seq_48); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp28; Rd = ((st32) ( ...; + RzILOpEffect *seq_50 = SEQN(2, op_ASSIGN_2, for_49); + + RzILOpEffect *instruction_sequence = seq_50; + return instruction_sequence; +} + +// Rd = vsubh(Rt,Rs) +RzILOpEffect *hex_il_op_a2_svsubh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp29 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp29", VARL("i")); + + // seq(h_tmp29 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((st16) ((Rt >> i * 0x10) & 0xffff))) - ((st32) ((st16) ((Rs >> i * 0x10) & 0xffff))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_16 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_14); + RzILOpPure *op_MUL_19 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_20 = SHIFTRA(Rt, op_MUL_19); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_20, SN(32, 0xffff)); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rs, op_MUL_26); + RzILOpPure *op_AND_29 = LOGAND(op_RSHIFT_27, SN(32, 0xffff)); + RzILOpPure *op_SUB_33 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_22), DUP(op_AND_22))), CAST(16, MSB(DUP(op_AND_22)), DUP(op_AND_22))), CAST(32, MSB(CAST(16, MSB(op_AND_29), DUP(op_AND_29))), CAST(16, MSB(DUP(op_AND_29)), DUP(op_AND_29)))); + RzILOpPure *op_AND_35 = LOGAND(op_SUB_33, SN(32, 0xffff)); + RzILOpPure *op_MUL_38 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_39 = SHIFTL0(CAST(64, IL_FALSE, op_AND_35), op_MUL_38); + RzILOpPure *op_OR_41 = LOGOR(CAST(64, IL_FALSE, op_AND_16), op_LSHIFT_39); + RzILOpEffect *op_ASSIGN_43 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_41)); + + // seq(h_tmp29; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << i ...; + RzILOpEffect *seq_45 = op_ASSIGN_43; + + // seq(seq(h_tmp29; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ...; + RzILOpEffect *seq_46 = SEQN(2, seq_45, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp29; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_47 = REPEAT(op_LT_4, seq_46); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp29; Rd = ((st32) ( ...; + RzILOpEffect *seq_48 = SEQN(2, op_ASSIGN_2, for_47); + + RzILOpEffect *instruction_sequence = seq_48; + return instruction_sequence; +} + +// Rd = vsubh(Rt,Rs):sat +RzILOpEffect *hex_il_op_a2_svsubhs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp30 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp30", VARL("i")); + + // seq(h_tmp30 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_76 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> i * 0x10) & 0xffff))) - ((st32) ((st16) ((Rs >> i * 0x10) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> i * 0x10) & 0xffff))) - ((st32) ((st16) ((Rs >> i * 0x10) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rt >> i * 0x10) & 0xffff))) - ((st32) ((st16) ((Rs >> i * 0x10) & 0xffff))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_MUL_22 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rt, op_MUL_22); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(32, 0xffff)); + RzILOpPure *op_MUL_29 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_30 = SHIFTRA(Rs, op_MUL_29); + RzILOpPure *op_AND_32 = LOGAND(op_RSHIFT_30, SN(32, 0xffff)); + RzILOpPure *op_SUB_36 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_25), DUP(op_AND_25))), CAST(16, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(CAST(16, MSB(op_AND_32), DUP(op_AND_32))), CAST(16, MSB(DUP(op_AND_32)), DUP(op_AND_32)))); + RzILOpPure *op_MUL_43 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_44 = SHIFTRA(DUP(Rt), op_MUL_43); + RzILOpPure *op_AND_46 = LOGAND(op_RSHIFT_44, SN(32, 0xffff)); + RzILOpPure *op_MUL_49 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_50 = SHIFTRA(DUP(Rs), op_MUL_49); + RzILOpPure *op_AND_52 = LOGAND(op_RSHIFT_50, SN(32, 0xffff)); + RzILOpPure *op_SUB_56 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_46), DUP(op_AND_46))), CAST(16, MSB(DUP(op_AND_46)), DUP(op_AND_46))), CAST(32, MSB(CAST(16, MSB(op_AND_52), DUP(op_AND_52))), CAST(16, MSB(DUP(op_AND_52)), DUP(op_AND_52)))); + RzILOpPure *op_EQ_58 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_36), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_SUB_56), DUP(op_SUB_56))); + RzILOpPure *op_MUL_78 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_79 = SHIFTRA(DUP(Rt), op_MUL_78); + RzILOpPure *op_AND_81 = LOGAND(op_RSHIFT_79, SN(32, 0xffff)); + RzILOpPure *op_MUL_84 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_85 = SHIFTRA(DUP(Rs), op_MUL_84); + RzILOpPure *op_AND_87 = LOGAND(op_RSHIFT_85, SN(32, 0xffff)); + RzILOpPure *op_SUB_91 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_81), DUP(op_AND_81))), CAST(16, MSB(DUP(op_AND_81)), DUP(op_AND_81))), CAST(32, MSB(CAST(16, MSB(op_AND_87), DUP(op_AND_87))), CAST(16, MSB(DUP(op_AND_87)), DUP(op_AND_87)))); + RzILOpPure *op_LT_93 = SLT(op_SUB_91, SN(32, 0)); + RzILOpPure *op_LSHIFT_98 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_99 = NEG(op_LSHIFT_98); + RzILOpPure *op_LSHIFT_104 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_107 = SUB(op_LSHIFT_104, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_108 = ITE(op_LT_93, op_NEG_99, op_SUB_107); + RzILOpEffect *gcc_expr_109 = BRANCH(op_EQ_58, EMPTY(), set_usr_field_call_76); + + // h_tmp31 = HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> i * 0x10) & 0xffff))) - ((st32) ((st16) ((Rs >> i * 0x10) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> i * 0x10) & 0xffff))) - ((st32) ((st16) ((Rs >> i * 0x10) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rt >> i * 0x10) & 0xffff))) - ((st32) ((st16) ((Rs >> i * 0x10) & 0xffff))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_111 = SETL("h_tmp31", cond_108); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rt >> ...; + RzILOpEffect *seq_112 = SEQN(2, gcc_expr_109, op_ASSIGN_hybrid_tmp_111); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << i * 0x10)))) | (((ut64) (((sextract64(((ut64) ((st32) ((st16) ((Rt >> i * 0x10) & 0xffff))) - ((st32) ((st16) ((Rs >> i * 0x10) & 0xffff)))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rt >> i * 0x10) & 0xffff))) - ((st32) ((st16) ((Rs >> i * 0x10) & 0xffff))))) ? ((st64) ((st32) ((st16) ((Rt >> i * 0x10) & 0xffff))) - ((st32) ((st16) ((Rs >> i * 0x10) & 0xffff)))) : h_tmp31) & ((st64) 0xffff))) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_16 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_14); + RzILOpPure *op_MUL_60 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_61 = SHIFTRA(DUP(Rt), op_MUL_60); + RzILOpPure *op_AND_63 = LOGAND(op_RSHIFT_61, SN(32, 0xffff)); + RzILOpPure *op_MUL_66 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_67 = SHIFTRA(DUP(Rs), op_MUL_66); + RzILOpPure *op_AND_69 = LOGAND(op_RSHIFT_67, SN(32, 0xffff)); + RzILOpPure *op_SUB_73 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_63), DUP(op_AND_63))), CAST(16, MSB(DUP(op_AND_63)), DUP(op_AND_63))), CAST(32, MSB(CAST(16, MSB(op_AND_69), DUP(op_AND_69))), CAST(16, MSB(DUP(op_AND_69)), DUP(op_AND_69)))); + RzILOpPure *cond_114 = ITE(DUP(op_EQ_58), CAST(64, MSB(op_SUB_73), DUP(op_SUB_73)), VARL("h_tmp31")); + RzILOpPure *op_AND_117 = LOGAND(cond_114, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_120 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_121 = SHIFTL0(CAST(64, IL_FALSE, op_AND_117), op_MUL_120); + RzILOpPure *op_OR_123 = LOGOR(CAST(64, IL_FALSE, op_AND_16), op_LSHIFT_121); + RzILOpEffect *op_ASSIGN_125 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_123)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((R ...; + RzILOpEffect *seq_126 = SEQN(2, seq_112, op_ASSIGN_125); + + // seq(h_tmp30; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32 ...; + RzILOpEffect *seq_128 = seq_126; + + // seq(seq(h_tmp30; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (( ...; + RzILOpEffect *seq_129 = SEQN(2, seq_128, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp30; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_130 = REPEAT(op_LT_4, seq_129); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp30; seq(seq(HYB(gc ...; + RzILOpEffect *seq_131 = SEQN(2, op_ASSIGN_2, for_130); + + RzILOpEffect *instruction_sequence = seq_131; + return instruction_sequence; +} + +// Rd = vsubuh(Rt,Rs):sat +RzILOpEffect *hex_il_op_a2_svsubuhs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp32 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp32", VARL("i")); + + // seq(h_tmp32 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_76 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((extract64(((ut64) ((st32) ((ut16) ((Rt >> i * 0x10) & 0xffff))) - ((st32) ((ut16) ((Rs >> i * 0x10) & 0xffff)))), 0x0, 0x10) == ((ut64) ((st32) ((ut16) ((Rt >> i * 0x10) & 0xffff))) - ((st32) ((ut16) ((Rs >> i * 0x10) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((ut16) ((Rt >> i * 0x10) & 0xffff))) - ((st32) ((ut16) ((Rs >> i * 0x10) & 0xffff))) < 0x0) ? ((st64) 0x0) : (0x1 << 0x10) - ((st64) 0x1))); + RzILOpPure *op_MUL_22 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rt, op_MUL_22); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(32, 0xffff)); + RzILOpPure *op_MUL_29 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_30 = SHIFTRA(Rs, op_MUL_29); + RzILOpPure *op_AND_32 = LOGAND(op_RSHIFT_30, SN(32, 0xffff)); + RzILOpPure *op_SUB_36 = SUB(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_25)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_32))); + RzILOpPure *op_MUL_43 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_44 = SHIFTRA(DUP(Rt), op_MUL_43); + RzILOpPure *op_AND_46 = LOGAND(op_RSHIFT_44, SN(32, 0xffff)); + RzILOpPure *op_MUL_49 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_50 = SHIFTRA(DUP(Rs), op_MUL_49); + RzILOpPure *op_AND_52 = LOGAND(op_RSHIFT_50, SN(32, 0xffff)); + RzILOpPure *op_SUB_56 = SUB(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_46)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_52))); + RzILOpPure *op_EQ_58 = EQ(EXTRACT64(CAST(64, IL_FALSE, op_SUB_36), SN(32, 0), SN(32, 16)), CAST(64, IL_FALSE, op_SUB_56)); + RzILOpPure *op_MUL_78 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_79 = SHIFTRA(DUP(Rt), op_MUL_78); + RzILOpPure *op_AND_81 = LOGAND(op_RSHIFT_79, SN(32, 0xffff)); + RzILOpPure *op_MUL_84 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_85 = SHIFTRA(DUP(Rs), op_MUL_84); + RzILOpPure *op_AND_87 = LOGAND(op_RSHIFT_85, SN(32, 0xffff)); + RzILOpPure *op_SUB_91 = SUB(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_81)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_87))); + RzILOpPure *op_LT_93 = SLT(op_SUB_91, SN(32, 0)); + RzILOpPure *op_LSHIFT_97 = SHIFTL0(SN(64, 1), SN(32, 16)); + RzILOpPure *op_SUB_100 = SUB(op_LSHIFT_97, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_102 = ITE(op_LT_93, CAST(64, MSB(SN(32, 0)), SN(32, 0)), op_SUB_100); + RzILOpEffect *gcc_expr_103 = BRANCH(op_EQ_58, EMPTY(), set_usr_field_call_76); + + // h_tmp33 = HYB(gcc_expr_if ((extract64(((ut64) ((st32) ((ut16) ((Rt >> i * 0x10) & 0xffff))) - ((st32) ((ut16) ((Rs >> i * 0x10) & 0xffff)))), 0x0, 0x10) == ((ut64) ((st32) ((ut16) ((Rt >> i * 0x10) & 0xffff))) - ((st32) ((ut16) ((Rs >> i * 0x10) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((ut16) ((Rt >> i * 0x10) & 0xffff))) - ((st32) ((ut16) ((Rs >> i * 0x10) & 0xffff))) < 0x0) ? ((st64) 0x0) : (0x1 << 0x10) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_105 = SETL("h_tmp33", cond_102); + + // seq(HYB(gcc_expr_if ((extract64(((ut64) ((st32) ((ut16) ((Rt >> ...; + RzILOpEffect *seq_106 = SEQN(2, gcc_expr_103, op_ASSIGN_hybrid_tmp_105); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << i * 0x10)))) | (((ut64) (((extract64(((ut64) ((st32) ((ut16) ((Rt >> i * 0x10) & 0xffff))) - ((st32) ((ut16) ((Rs >> i * 0x10) & 0xffff)))), 0x0, 0x10) == ((ut64) ((st32) ((ut16) ((Rt >> i * 0x10) & 0xffff))) - ((st32) ((ut16) ((Rs >> i * 0x10) & 0xffff))))) ? ((st64) ((st32) ((ut16) ((Rt >> i * 0x10) & 0xffff))) - ((st32) ((ut16) ((Rs >> i * 0x10) & 0xffff)))) : h_tmp33) & ((st64) 0xffff))) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_16 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_14); + RzILOpPure *op_MUL_60 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_61 = SHIFTRA(DUP(Rt), op_MUL_60); + RzILOpPure *op_AND_63 = LOGAND(op_RSHIFT_61, SN(32, 0xffff)); + RzILOpPure *op_MUL_66 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_67 = SHIFTRA(DUP(Rs), op_MUL_66); + RzILOpPure *op_AND_69 = LOGAND(op_RSHIFT_67, SN(32, 0xffff)); + RzILOpPure *op_SUB_73 = SUB(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_63)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_69))); + RzILOpPure *cond_108 = ITE(DUP(op_EQ_58), CAST(64, MSB(op_SUB_73), DUP(op_SUB_73)), VARL("h_tmp33")); + RzILOpPure *op_AND_111 = LOGAND(cond_108, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_114 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_115 = SHIFTL0(CAST(64, IL_FALSE, op_AND_111), op_MUL_114); + RzILOpPure *op_OR_117 = LOGOR(CAST(64, IL_FALSE, op_AND_16), op_LSHIFT_115); + RzILOpEffect *op_ASSIGN_119 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_117)); + + // seq(seq(HYB(gcc_expr_if ((extract64(((ut64) ((st32) ((ut16) ((Rt ...; + RzILOpEffect *seq_120 = SEQN(2, seq_106, op_ASSIGN_119); + + // seq(h_tmp32; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) ((st32) ...; + RzILOpEffect *seq_122 = seq_120; + + // seq(seq(h_tmp32; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) ((s ...; + RzILOpEffect *seq_123 = SEQN(2, seq_122, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp32; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) ((s ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_124 = REPEAT(op_LT_4, seq_123); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp32; seq(seq(HYB(gc ...; + RzILOpEffect *seq_125 = SEQN(2, op_ASSIGN_2, for_124); + + RzILOpEffect *instruction_sequence = seq_125; + return instruction_sequence; +} + +// Rd = swiz(Rs) +RzILOpEffect *hex_il_op_a2_swiz(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xff << 0x0)))) | (((ut64) (((st64) ((st32) ((st8) ((Rs >> 0x18) & 0xff)))) & 0xff)) << 0x0))); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_8 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_6); + RzILOpPure *op_RSHIFT_13 = SHIFTRA(Rs, SN(32, 24)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_13, SN(32, 0xff)); + RzILOpPure *op_AND_20 = LOGAND(CAST(64, MSB(CAST(32, MSB(CAST(8, MSB(op_AND_15), DUP(op_AND_15))), CAST(8, MSB(DUP(op_AND_15)), DUP(op_AND_15)))), CAST(32, MSB(CAST(8, MSB(DUP(op_AND_15)), DUP(op_AND_15))), CAST(8, MSB(DUP(op_AND_15)), DUP(op_AND_15)))), SN(64, 0xff)); + RzILOpPure *op_LSHIFT_25 = SHIFTL0(CAST(64, IL_FALSE, op_AND_20), SN(32, 0)); + RzILOpPure *op_OR_27 = LOGOR(CAST(64, IL_FALSE, op_AND_8), op_LSHIFT_25); + RzILOpEffect *op_ASSIGN_29 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_27)); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xff << 0x8)))) | (((ut64) (((st64) ((st32) ((st8) ((Rs >> 0x10) & 0xff)))) & 0xff)) << 0x8))); + RzILOpPure *op_LSHIFT_35 = SHIFTL0(SN(64, 0xff), SN(32, 8)); + RzILOpPure *op_NOT_36 = LOGNOT(op_LSHIFT_35); + RzILOpPure *op_AND_38 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_36); + RzILOpPure *op_RSHIFT_42 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_44 = LOGAND(op_RSHIFT_42, SN(32, 0xff)); + RzILOpPure *op_AND_49 = LOGAND(CAST(64, MSB(CAST(32, MSB(CAST(8, MSB(op_AND_44), DUP(op_AND_44))), CAST(8, MSB(DUP(op_AND_44)), DUP(op_AND_44)))), CAST(32, MSB(CAST(8, MSB(DUP(op_AND_44)), DUP(op_AND_44))), CAST(8, MSB(DUP(op_AND_44)), DUP(op_AND_44)))), SN(64, 0xff)); + RzILOpPure *op_LSHIFT_54 = SHIFTL0(CAST(64, IL_FALSE, op_AND_49), SN(32, 8)); + RzILOpPure *op_OR_56 = LOGOR(CAST(64, IL_FALSE, op_AND_38), op_LSHIFT_54); + RzILOpEffect *op_ASSIGN_58 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_56)); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xff << 0x10)))) | (((ut64) (((st64) ((st32) ((st8) ((Rs >> 0x8) & 0xff)))) & 0xff)) << 0x10))); + RzILOpPure *op_LSHIFT_64 = SHIFTL0(SN(64, 0xff), SN(32, 16)); + RzILOpPure *op_NOT_65 = LOGNOT(op_LSHIFT_64); + RzILOpPure *op_AND_67 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_65); + RzILOpPure *op_RSHIFT_71 = SHIFTRA(DUP(Rs), SN(32, 8)); + RzILOpPure *op_AND_73 = LOGAND(op_RSHIFT_71, SN(32, 0xff)); + RzILOpPure *op_AND_78 = LOGAND(CAST(64, MSB(CAST(32, MSB(CAST(8, MSB(op_AND_73), DUP(op_AND_73))), CAST(8, MSB(DUP(op_AND_73)), DUP(op_AND_73)))), CAST(32, MSB(CAST(8, MSB(DUP(op_AND_73)), DUP(op_AND_73))), CAST(8, MSB(DUP(op_AND_73)), DUP(op_AND_73)))), SN(64, 0xff)); + RzILOpPure *op_LSHIFT_83 = SHIFTL0(CAST(64, IL_FALSE, op_AND_78), SN(32, 16)); + RzILOpPure *op_OR_85 = LOGOR(CAST(64, IL_FALSE, op_AND_67), op_LSHIFT_83); + RzILOpEffect *op_ASSIGN_87 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_85)); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xff << 0x18)))) | (((ut64) (((st64) ((st32) ((st8) ((Rs >> 0x0) & 0xff)))) & 0xff)) << 0x18))); + RzILOpPure *op_LSHIFT_93 = SHIFTL0(SN(64, 0xff), SN(32, 24)); + RzILOpPure *op_NOT_94 = LOGNOT(op_LSHIFT_93); + RzILOpPure *op_AND_96 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_94); + RzILOpPure *op_RSHIFT_100 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_102 = LOGAND(op_RSHIFT_100, SN(32, 0xff)); + RzILOpPure *op_AND_107 = LOGAND(CAST(64, MSB(CAST(32, MSB(CAST(8, MSB(op_AND_102), DUP(op_AND_102))), CAST(8, MSB(DUP(op_AND_102)), DUP(op_AND_102)))), CAST(32, MSB(CAST(8, MSB(DUP(op_AND_102)), DUP(op_AND_102))), CAST(8, MSB(DUP(op_AND_102)), DUP(op_AND_102)))), SN(64, 0xff)); + RzILOpPure *op_LSHIFT_112 = SHIFTL0(CAST(64, IL_FALSE, op_AND_107), SN(32, 24)); + RzILOpPure *op_OR_114 = LOGOR(CAST(64, IL_FALSE, op_AND_96), op_LSHIFT_112); + RzILOpEffect *op_ASSIGN_116 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_114)); + + RzILOpEffect *instruction_sequence = SEQN(4, op_ASSIGN_29, op_ASSIGN_58, op_ASSIGN_87, op_ASSIGN_116); + return instruction_sequence; +} + +// Rd = sxtb(Rs) +RzILOpEffect *hex_il_op_a2_sxtb(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = ((st32) sextract64(((ut64) Rs), 0x0, 0x8)); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rs), SN(32, 0), SN(32, 8))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rs)), SN(32, 0), SN(32, 8)))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_11; + return instruction_sequence; +} + +// Rd = sxth(Rs) +RzILOpEffect *hex_il_op_a2_sxth(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = ((st32) sextract64(((ut64) Rs), 0x0, 0x10)); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rs), SN(32, 0), SN(32, 16))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rs)), SN(32, 0), SN(32, 16)))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_11; + return instruction_sequence; +} + +// Rdd = sxtw(Rs) +RzILOpEffect *hex_il_op_a2_sxtw(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rdd = ((st64) Rs); + RzILOpEffect *op_ASSIGN_3 = WRITE_REG(bundle, Rdd_op, CAST(64, MSB(Rs), DUP(Rs))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_3; + return instruction_sequence; +} + +// Rd = Rs +RzILOpEffect *hex_il_op_a2_tfr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = Rs; + RzILOpEffect *op_ASSIGN_2 = WRITE_REG(bundle, Rd_op, Rs); + + RzILOpEffect *instruction_sequence = op_ASSIGN_2; + return instruction_sequence; +} + +// Rd = Cs +RzILOpEffect *hex_il_op_a2_tfrcrr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Cs_op = ISA2REG(hi, 's', false); + RzILOpPure *Cs = READ_REG(pkt, Cs_op, false); + + // Rd = Cs; + RzILOpEffect *op_ASSIGN_2 = WRITE_REG(bundle, Rd_op, Cs); + + RzILOpEffect *instruction_sequence = op_ASSIGN_2; + return instruction_sequence; +} + +// Rx.h = Ii +RzILOpEffect *hex_il_op_a2_tfrih(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_9 = SETL("u", u); + + // Rx = ((st32) (((ut64) (((st64) Rx) & (~(0xffff << 0x10)))) | (((ut64) (u & ((ut32) 0xffff))) << 0x10))); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffff), SN(32, 16)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_8 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_NOT_6); + RzILOpPure *op_AND_13 = LOGAND(VARL("u"), CAST(32, IL_FALSE, SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_18 = SHIFTL0(CAST(64, IL_FALSE, op_AND_13), SN(32, 16)); + RzILOpPure *op_OR_20 = LOGOR(CAST(64, IL_FALSE, op_AND_8), op_LSHIFT_18); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_OR_20)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_9, op_ASSIGN_22); + return instruction_sequence; +} + +// Rx.l = Ii +RzILOpEffect *hex_il_op_a2_tfril(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_9 = SETL("u", u); + + // Rx = ((st32) (((ut64) (((st64) Rx) & (~(0xffff << 0x0)))) | (((ut64) (u & ((ut32) 0xffff))) << 0x0))); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_8 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_NOT_6); + RzILOpPure *op_AND_13 = LOGAND(VARL("u"), CAST(32, IL_FALSE, SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_18 = SHIFTL0(CAST(64, IL_FALSE, op_AND_13), SN(32, 0)); + RzILOpPure *op_OR_20 = LOGOR(CAST(64, IL_FALSE, op_AND_8), op_LSHIFT_18); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_OR_20)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_9, op_ASSIGN_22); + return instruction_sequence; +} + +// Cd = Rs +RzILOpEffect *hex_il_op_a2_tfrrcr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Cd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Cd = Rs; + RzILOpEffect *op_ASSIGN_2 = WRITE_REG(bundle, Cd_op, Rs); + + RzILOpEffect *instruction_sequence = op_ASSIGN_2; + return instruction_sequence; +} + +// Rd = Ii +RzILOpEffect *hex_il_op_a2_tfrsi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // Rd = s; + RzILOpEffect *op_ASSIGN_3 = WRITE_REG(bundle, Rd_op, VARL("s")); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_0, op_ASSIGN_3); + return instruction_sequence; +} + +// Rdd = vabsh(Rss) +RzILOpEffect *hex_il_op_a2_vabsh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp34 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp34", VARL("i")); + + // seq(h_tmp34 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) < 0x0) ? (-((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))) : ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rss, op_MUL_18); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LT_26 = SLT(CAST(32, MSB(CAST(16, MSB(op_AND_22), DUP(op_AND_22))), CAST(16, MSB(DUP(op_AND_22)), DUP(op_AND_22))), SN(32, 0)); + RzILOpPure *op_MUL_28 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_29 = SHIFTRA(DUP(Rss), op_MUL_28); + RzILOpPure *op_AND_32 = LOGAND(op_RSHIFT_29, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_35 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_32), DUP(op_AND_32))), CAST(16, MSB(DUP(op_AND_32)), DUP(op_AND_32)))); + RzILOpPure *op_MUL_37 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_38 = SHIFTRA(DUP(Rss), op_MUL_37); + RzILOpPure *op_AND_41 = LOGAND(op_RSHIFT_38, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *cond_44 = ITE(op_LT_26, op_NEG_35, CAST(32, MSB(CAST(16, MSB(op_AND_41), DUP(op_AND_41))), CAST(16, MSB(DUP(op_AND_41)), DUP(op_AND_41)))); + RzILOpPure *op_AND_46 = LOGAND(cond_44, SN(32, 0xffff)); + RzILOpPure *op_MUL_49 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_50 = SHIFTL0(CAST(64, IL_FALSE, op_AND_46), op_MUL_49); + RzILOpPure *op_OR_52 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_50); + RzILOpEffect *op_ASSIGN_54 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_52)); + + // seq(h_tmp34; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10 ...; + RzILOpEffect *seq_56 = op_ASSIGN_54; + + // seq(seq(h_tmp34; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_57 = SEQN(2, seq_56, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp34; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_58 = REPEAT(op_LT_4, seq_57); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp34; Rdd = ((st64) ...; + RzILOpEffect *seq_59 = SEQN(2, op_ASSIGN_2, for_58); + + RzILOpEffect *instruction_sequence = seq_59; + return instruction_sequence; +} + +// Rdd = vabsh(Rss):sat +RzILOpEffect *hex_il_op_a2_vabshsat(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp35 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp35", VARL("i")); + + // seq(h_tmp35 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_113 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) < 0x0) ? (-((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))) : ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))))), 0x0, 0x10) == ((st64) ((((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) < 0x0) ? (-((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))) : ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) < 0x0) ? (-((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))) : ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_MUL_21 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(Rss, op_MUL_21); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_22, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LT_29 = SLT(CAST(32, MSB(CAST(16, MSB(op_AND_25), DUP(op_AND_25))), CAST(16, MSB(DUP(op_AND_25)), DUP(op_AND_25))), SN(32, 0)); + RzILOpPure *op_MUL_31 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_32 = SHIFTRA(DUP(Rss), op_MUL_31); + RzILOpPure *op_AND_35 = LOGAND(op_RSHIFT_32, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_38 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_35), DUP(op_AND_35))), CAST(16, MSB(DUP(op_AND_35)), DUP(op_AND_35)))); + RzILOpPure *op_MUL_40 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_41 = SHIFTRA(DUP(Rss), op_MUL_40); + RzILOpPure *op_AND_44 = LOGAND(op_RSHIFT_41, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *cond_47 = ITE(op_LT_29, op_NEG_38, CAST(32, MSB(CAST(16, MSB(op_AND_44), DUP(op_AND_44))), CAST(16, MSB(DUP(op_AND_44)), DUP(op_AND_44)))); + RzILOpPure *op_MUL_54 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_55 = SHIFTRA(DUP(Rss), op_MUL_54); + RzILOpPure *op_AND_58 = LOGAND(op_RSHIFT_55, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LT_62 = SLT(CAST(32, MSB(CAST(16, MSB(op_AND_58), DUP(op_AND_58))), CAST(16, MSB(DUP(op_AND_58)), DUP(op_AND_58))), SN(32, 0)); + RzILOpPure *op_MUL_64 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_65 = SHIFTRA(DUP(Rss), op_MUL_64); + RzILOpPure *op_AND_68 = LOGAND(op_RSHIFT_65, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_71 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_68), DUP(op_AND_68))), CAST(16, MSB(DUP(op_AND_68)), DUP(op_AND_68)))); + RzILOpPure *op_MUL_73 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_74 = SHIFTRA(DUP(Rss), op_MUL_73); + RzILOpPure *op_AND_77 = LOGAND(op_RSHIFT_74, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *cond_80 = ITE(op_LT_62, op_NEG_71, CAST(32, MSB(CAST(16, MSB(op_AND_77), DUP(op_AND_77))), CAST(16, MSB(DUP(op_AND_77)), DUP(op_AND_77)))); + RzILOpPure *op_EQ_82 = EQ(SEXTRACT64(CAST(64, IL_FALSE, cond_47), SN(32, 0), SN(32, 16)), CAST(64, MSB(cond_80), DUP(cond_80))); + RzILOpPure *op_MUL_115 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_116 = SHIFTRA(DUP(Rss), op_MUL_115); + RzILOpPure *op_AND_119 = LOGAND(op_RSHIFT_116, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LT_123 = SLT(CAST(32, MSB(CAST(16, MSB(op_AND_119), DUP(op_AND_119))), CAST(16, MSB(DUP(op_AND_119)), DUP(op_AND_119))), SN(32, 0)); + RzILOpPure *op_MUL_125 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_126 = SHIFTRA(DUP(Rss), op_MUL_125); + RzILOpPure *op_AND_129 = LOGAND(op_RSHIFT_126, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_132 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_129), DUP(op_AND_129))), CAST(16, MSB(DUP(op_AND_129)), DUP(op_AND_129)))); + RzILOpPure *op_MUL_134 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_135 = SHIFTRA(DUP(Rss), op_MUL_134); + RzILOpPure *op_AND_138 = LOGAND(op_RSHIFT_135, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *cond_141 = ITE(op_LT_123, op_NEG_132, CAST(32, MSB(CAST(16, MSB(op_AND_138), DUP(op_AND_138))), CAST(16, MSB(DUP(op_AND_138)), DUP(op_AND_138)))); + RzILOpPure *op_LT_143 = SLT(cond_141, SN(32, 0)); + RzILOpPure *op_LSHIFT_148 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_149 = NEG(op_LSHIFT_148); + RzILOpPure *op_LSHIFT_154 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_157 = SUB(op_LSHIFT_154, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_158 = ITE(op_LT_143, op_NEG_149, op_SUB_157); + RzILOpEffect *gcc_expr_159 = BRANCH(op_EQ_82, EMPTY(), set_usr_field_call_113); + + // h_tmp36 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) < 0x0) ? (-((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))) : ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))))), 0x0, 0x10) == ((st64) ((((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) < 0x0) ? (-((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))) : ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) < 0x0) ? (-((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))) : ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_161 = SETL("h_tmp36", cond_158); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st32) ((st16) ((Rss ...; + RzILOpEffect *seq_162 = SEQN(2, gcc_expr_159, op_ASSIGN_hybrid_tmp_161); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((sextract64(((ut64) ((((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) < 0x0) ? (-((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))) : ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))))), 0x0, 0x10) == ((st64) ((((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) < 0x0) ? (-((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))) : ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))))) ? ((st64) ((((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) < 0x0) ? (-((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))) : ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))))) : h_tmp36) & ((st64) 0xffff))) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_84 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_85 = SHIFTRA(DUP(Rss), op_MUL_84); + RzILOpPure *op_AND_88 = LOGAND(op_RSHIFT_85, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LT_92 = SLT(CAST(32, MSB(CAST(16, MSB(op_AND_88), DUP(op_AND_88))), CAST(16, MSB(DUP(op_AND_88)), DUP(op_AND_88))), SN(32, 0)); + RzILOpPure *op_MUL_94 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_95 = SHIFTRA(DUP(Rss), op_MUL_94); + RzILOpPure *op_AND_98 = LOGAND(op_RSHIFT_95, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_101 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_98), DUP(op_AND_98))), CAST(16, MSB(DUP(op_AND_98)), DUP(op_AND_98)))); + RzILOpPure *op_MUL_103 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_104 = SHIFTRA(DUP(Rss), op_MUL_103); + RzILOpPure *op_AND_107 = LOGAND(op_RSHIFT_104, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *cond_110 = ITE(op_LT_92, op_NEG_101, CAST(32, MSB(CAST(16, MSB(op_AND_107), DUP(op_AND_107))), CAST(16, MSB(DUP(op_AND_107)), DUP(op_AND_107)))); + RzILOpPure *cond_164 = ITE(DUP(op_EQ_82), CAST(64, MSB(cond_110), DUP(cond_110)), VARL("h_tmp36")); + RzILOpPure *op_AND_167 = LOGAND(cond_164, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_170 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_171 = SHIFTL0(CAST(64, IL_FALSE, op_AND_167), op_MUL_170); + RzILOpPure *op_OR_173 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_171); + RzILOpEffect *op_ASSIGN_175 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_173)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st32) ((st16) ( ...; + RzILOpEffect *seq_176 = SEQN(2, seq_162, op_ASSIGN_175); + + // seq(h_tmp35; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st ...; + RzILOpEffect *seq_178 = seq_176; + + // seq(seq(h_tmp35; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (( ...; + RzILOpEffect *seq_179 = SEQN(2, seq_178, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp35; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_180 = REPEAT(op_LT_4, seq_179); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp35; seq(seq(HYB(gc ...; + RzILOpEffect *seq_181 = SEQN(2, op_ASSIGN_2, for_180); + + RzILOpEffect *instruction_sequence = seq_181; + return instruction_sequence; +} + +// Rdd = vabsw(Rss) +RzILOpEffect *hex_il_op_a2_vabsw(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp37 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp37", VARL("i")); + + // seq(h_tmp37 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ((((((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) < ((st64) 0x0)) ? (-((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))) : ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))) & 0xffffffff) << i * 0x20)); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffffffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rss, op_MUL_18); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_19, SN(64, 0xffffffff)); + RzILOpPure *op_LT_26 = SLT(CAST(64, MSB(CAST(32, MSB(op_AND_21), DUP(op_AND_21))), CAST(32, MSB(DUP(op_AND_21)), DUP(op_AND_21))), CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_MUL_28 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_29 = SHIFTRA(DUP(Rss), op_MUL_28); + RzILOpPure *op_AND_31 = LOGAND(op_RSHIFT_29, SN(64, 0xffffffff)); + RzILOpPure *op_NEG_34 = NEG(CAST(64, MSB(CAST(32, MSB(op_AND_31), DUP(op_AND_31))), CAST(32, MSB(DUP(op_AND_31)), DUP(op_AND_31)))); + RzILOpPure *op_MUL_36 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(DUP(Rss), op_MUL_36); + RzILOpPure *op_AND_39 = LOGAND(op_RSHIFT_37, SN(64, 0xffffffff)); + RzILOpPure *cond_42 = ITE(op_LT_26, op_NEG_34, CAST(64, MSB(CAST(32, MSB(op_AND_39), DUP(op_AND_39))), CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39)))); + RzILOpPure *op_AND_44 = LOGAND(cond_42, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_46 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_47 = SHIFTL0(op_AND_44, op_MUL_46); + RzILOpPure *op_OR_48 = LOGOR(op_AND_15, op_LSHIFT_47); + RzILOpEffect *op_ASSIGN_49 = WRITE_REG(bundle, Rdd_op, op_OR_48); + + // seq(h_tmp37; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | (((((( ...; + RzILOpEffect *seq_51 = op_ASSIGN_49; + + // seq(seq(h_tmp37; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | (( ...; + RzILOpEffect *seq_52 = SEQN(2, seq_51, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp37; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | (( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_53 = REPEAT(op_LT_4, seq_52); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp37; Rdd = ((Rdd & ...; + RzILOpEffect *seq_54 = SEQN(2, op_ASSIGN_2, for_53); + + RzILOpEffect *instruction_sequence = seq_54; + return instruction_sequence; +} + +// Rdd = vabsw(Rss):sat +RzILOpEffect *hex_il_op_a2_vabswsat(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp38 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp38", VARL("i")); + + // seq(h_tmp38 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_106 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) < ((st64) 0x0)) ? (-((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))) : ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))))), 0x0, 0x20) == ((((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) < ((st64) 0x0)) ? (-((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))) : ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) < ((st64) 0x0)) ? (-((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))) : ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_MUL_21 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(Rss, op_MUL_21); + RzILOpPure *op_AND_24 = LOGAND(op_RSHIFT_22, SN(64, 0xffffffff)); + RzILOpPure *op_LT_29 = SLT(CAST(64, MSB(CAST(32, MSB(op_AND_24), DUP(op_AND_24))), CAST(32, MSB(DUP(op_AND_24)), DUP(op_AND_24))), CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_MUL_31 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_32 = SHIFTRA(DUP(Rss), op_MUL_31); + RzILOpPure *op_AND_34 = LOGAND(op_RSHIFT_32, SN(64, 0xffffffff)); + RzILOpPure *op_NEG_37 = NEG(CAST(64, MSB(CAST(32, MSB(op_AND_34), DUP(op_AND_34))), CAST(32, MSB(DUP(op_AND_34)), DUP(op_AND_34)))); + RzILOpPure *op_MUL_39 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_40 = SHIFTRA(DUP(Rss), op_MUL_39); + RzILOpPure *op_AND_42 = LOGAND(op_RSHIFT_40, SN(64, 0xffffffff)); + RzILOpPure *cond_45 = ITE(op_LT_29, op_NEG_37, CAST(64, MSB(CAST(32, MSB(op_AND_42), DUP(op_AND_42))), CAST(32, MSB(DUP(op_AND_42)), DUP(op_AND_42)))); + RzILOpPure *op_MUL_52 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_53 = SHIFTRA(DUP(Rss), op_MUL_52); + RzILOpPure *op_AND_55 = LOGAND(op_RSHIFT_53, SN(64, 0xffffffff)); + RzILOpPure *op_LT_60 = SLT(CAST(64, MSB(CAST(32, MSB(op_AND_55), DUP(op_AND_55))), CAST(32, MSB(DUP(op_AND_55)), DUP(op_AND_55))), CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_MUL_62 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_63 = SHIFTRA(DUP(Rss), op_MUL_62); + RzILOpPure *op_AND_65 = LOGAND(op_RSHIFT_63, SN(64, 0xffffffff)); + RzILOpPure *op_NEG_68 = NEG(CAST(64, MSB(CAST(32, MSB(op_AND_65), DUP(op_AND_65))), CAST(32, MSB(DUP(op_AND_65)), DUP(op_AND_65)))); + RzILOpPure *op_MUL_70 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_71 = SHIFTRA(DUP(Rss), op_MUL_70); + RzILOpPure *op_AND_73 = LOGAND(op_RSHIFT_71, SN(64, 0xffffffff)); + RzILOpPure *cond_76 = ITE(op_LT_60, op_NEG_68, CAST(64, MSB(CAST(32, MSB(op_AND_73), DUP(op_AND_73))), CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73)))); + RzILOpPure *op_EQ_77 = EQ(SEXTRACT64(CAST(64, IL_FALSE, cond_45), SN(32, 0), SN(32, 0x20)), cond_76); + RzILOpPure *op_MUL_108 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_109 = SHIFTRA(DUP(Rss), op_MUL_108); + RzILOpPure *op_AND_111 = LOGAND(op_RSHIFT_109, SN(64, 0xffffffff)); + RzILOpPure *op_LT_116 = SLT(CAST(64, MSB(CAST(32, MSB(op_AND_111), DUP(op_AND_111))), CAST(32, MSB(DUP(op_AND_111)), DUP(op_AND_111))), CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_MUL_118 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_119 = SHIFTRA(DUP(Rss), op_MUL_118); + RzILOpPure *op_AND_121 = LOGAND(op_RSHIFT_119, SN(64, 0xffffffff)); + RzILOpPure *op_NEG_124 = NEG(CAST(64, MSB(CAST(32, MSB(op_AND_121), DUP(op_AND_121))), CAST(32, MSB(DUP(op_AND_121)), DUP(op_AND_121)))); + RzILOpPure *op_MUL_126 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_127 = SHIFTRA(DUP(Rss), op_MUL_126); + RzILOpPure *op_AND_129 = LOGAND(op_RSHIFT_127, SN(64, 0xffffffff)); + RzILOpPure *cond_132 = ITE(op_LT_116, op_NEG_124, CAST(64, MSB(CAST(32, MSB(op_AND_129), DUP(op_AND_129))), CAST(32, MSB(DUP(op_AND_129)), DUP(op_AND_129)))); + RzILOpPure *op_LT_135 = SLT(cond_132, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_140 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_141 = NEG(op_LSHIFT_140); + RzILOpPure *op_LSHIFT_146 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_149 = SUB(op_LSHIFT_146, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_150 = ITE(op_LT_135, op_NEG_141, op_SUB_149); + RzILOpEffect *gcc_expr_151 = BRANCH(op_EQ_77, EMPTY(), set_usr_field_call_106); + + // h_tmp39 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) < ((st64) 0x0)) ? (-((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))) : ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))))), 0x0, 0x20) == ((((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) < ((st64) 0x0)) ? (-((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))) : ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) < ((st64) 0x0)) ? (-((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))) : ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_153 = SETL("h_tmp39", cond_150); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((Rss ...; + RzILOpEffect *seq_154 = SEQN(2, gcc_expr_151, op_ASSIGN_hybrid_tmp_153); + + // Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ((((sextract64(((ut64) ((((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) < ((st64) 0x0)) ? (-((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))) : ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))))), 0x0, 0x20) == ((((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) < ((st64) 0x0)) ? (-((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))) : ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))))) ? ((((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) < ((st64) 0x0)) ? (-((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))) : ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))) : h_tmp39) & 0xffffffff) << i * 0x20)); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffffffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_79 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_80 = SHIFTRA(DUP(Rss), op_MUL_79); + RzILOpPure *op_AND_82 = LOGAND(op_RSHIFT_80, SN(64, 0xffffffff)); + RzILOpPure *op_LT_87 = SLT(CAST(64, MSB(CAST(32, MSB(op_AND_82), DUP(op_AND_82))), CAST(32, MSB(DUP(op_AND_82)), DUP(op_AND_82))), CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_MUL_89 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_90 = SHIFTRA(DUP(Rss), op_MUL_89); + RzILOpPure *op_AND_92 = LOGAND(op_RSHIFT_90, SN(64, 0xffffffff)); + RzILOpPure *op_NEG_95 = NEG(CAST(64, MSB(CAST(32, MSB(op_AND_92), DUP(op_AND_92))), CAST(32, MSB(DUP(op_AND_92)), DUP(op_AND_92)))); + RzILOpPure *op_MUL_97 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_98 = SHIFTRA(DUP(Rss), op_MUL_97); + RzILOpPure *op_AND_100 = LOGAND(op_RSHIFT_98, SN(64, 0xffffffff)); + RzILOpPure *cond_103 = ITE(op_LT_87, op_NEG_95, CAST(64, MSB(CAST(32, MSB(op_AND_100), DUP(op_AND_100))), CAST(32, MSB(DUP(op_AND_100)), DUP(op_AND_100)))); + RzILOpPure *cond_155 = ITE(DUP(op_EQ_77), cond_103, VARL("h_tmp39")); + RzILOpPure *op_AND_157 = LOGAND(cond_155, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_159 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_160 = SHIFTL0(op_AND_157, op_MUL_159); + RzILOpPure *op_OR_161 = LOGOR(op_AND_15, op_LSHIFT_160); + RzILOpEffect *op_ASSIGN_162 = WRITE_REG(bundle, Rdd_op, op_OR_161); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ( ...; + RzILOpEffect *seq_163 = SEQN(2, seq_154, op_ASSIGN_162); + + // seq(h_tmp38; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st ...; + RzILOpEffect *seq_165 = seq_163; + + // seq(seq(h_tmp38; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (( ...; + RzILOpEffect *seq_166 = SEQN(2, seq_165, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp38; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_167 = REPEAT(op_LT_4, seq_166); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp38; seq(seq(HYB(gc ...; + RzILOpEffect *seq_168 = SEQN(2, op_ASSIGN_2, for_167); + + RzILOpEffect *instruction_sequence = seq_168; + return instruction_sequence; +} + +// Rdd = vaddh(Rss,Rtt) +RzILOpEffect *hex_il_op_a2_vaddh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp40 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp40", VARL("i")); + + // seq(h_tmp40 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rss, op_MUL_18); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rtt, op_MUL_26); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_27, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_34 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_22), DUP(op_AND_22))), CAST(16, MSB(DUP(op_AND_22)), DUP(op_AND_22))), CAST(32, MSB(CAST(16, MSB(op_AND_30), DUP(op_AND_30))), CAST(16, MSB(DUP(op_AND_30)), DUP(op_AND_30)))); + RzILOpPure *op_AND_36 = LOGAND(op_ADD_34, SN(32, 0xffff)); + RzILOpPure *op_MUL_39 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_40 = SHIFTL0(CAST(64, IL_FALSE, op_AND_36), op_MUL_39); + RzILOpPure *op_OR_42 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_40); + RzILOpEffect *op_ASSIGN_44 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_42)); + + // seq(h_tmp40; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10 ...; + RzILOpEffect *seq_46 = op_ASSIGN_44; + + // seq(seq(h_tmp40; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_47 = SEQN(2, seq_46, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp40; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_48 = REPEAT(op_LT_4, seq_47); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp40; Rdd = ((st64) ...; + RzILOpEffect *seq_49 = SEQN(2, op_ASSIGN_2, for_48); + + RzILOpEffect *instruction_sequence = seq_49; + return instruction_sequence; +} + +// Rdd = vaddh(Rss,Rtt):sat +RzILOpEffect *hex_il_op_a2_vaddhs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp41 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp41", VARL("i")); + + // seq(h_tmp41 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_81 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_MUL_21 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(Rss, op_MUL_21); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_22, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_29 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_30 = SHIFTRA(Rtt, op_MUL_29); + RzILOpPure *op_AND_33 = LOGAND(op_RSHIFT_30, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_37 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_25), DUP(op_AND_25))), CAST(16, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(CAST(16, MSB(op_AND_33), DUP(op_AND_33))), CAST(16, MSB(DUP(op_AND_33)), DUP(op_AND_33)))); + RzILOpPure *op_MUL_44 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_45 = SHIFTRA(DUP(Rss), op_MUL_44); + RzILOpPure *op_AND_48 = LOGAND(op_RSHIFT_45, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_51 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_52 = SHIFTRA(DUP(Rtt), op_MUL_51); + RzILOpPure *op_AND_55 = LOGAND(op_RSHIFT_52, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_59 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_48), DUP(op_AND_48))), CAST(16, MSB(DUP(op_AND_48)), DUP(op_AND_48))), CAST(32, MSB(CAST(16, MSB(op_AND_55), DUP(op_AND_55))), CAST(16, MSB(DUP(op_AND_55)), DUP(op_AND_55)))); + RzILOpPure *op_EQ_61 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_37), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_ADD_59), DUP(op_ADD_59))); + RzILOpPure *op_MUL_83 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_84 = SHIFTRA(DUP(Rss), op_MUL_83); + RzILOpPure *op_AND_87 = LOGAND(op_RSHIFT_84, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_90 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_91 = SHIFTRA(DUP(Rtt), op_MUL_90); + RzILOpPure *op_AND_94 = LOGAND(op_RSHIFT_91, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_98 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_87), DUP(op_AND_87))), CAST(16, MSB(DUP(op_AND_87)), DUP(op_AND_87))), CAST(32, MSB(CAST(16, MSB(op_AND_94), DUP(op_AND_94))), CAST(16, MSB(DUP(op_AND_94)), DUP(op_AND_94)))); + RzILOpPure *op_LT_100 = SLT(op_ADD_98, SN(32, 0)); + RzILOpPure *op_LSHIFT_105 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_106 = NEG(op_LSHIFT_105); + RzILOpPure *op_LSHIFT_111 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_114 = SUB(op_LSHIFT_111, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_115 = ITE(op_LT_100, op_NEG_106, op_SUB_114); + RzILOpEffect *gcc_expr_116 = BRANCH(op_EQ_61, EMPTY(), set_usr_field_call_81); + + // h_tmp42 = HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_118 = SETL("h_tmp42", cond_115); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rss > ...; + RzILOpEffect *seq_119 = SEQN(2, gcc_expr_116, op_ASSIGN_hybrid_tmp_118); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((sextract64(((ut64) ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))))) ? ((st64) ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff))))) : h_tmp42) & ((st64) 0xffff))) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_63 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_64 = SHIFTRA(DUP(Rss), op_MUL_63); + RzILOpPure *op_AND_67 = LOGAND(op_RSHIFT_64, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_70 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_71 = SHIFTRA(DUP(Rtt), op_MUL_70); + RzILOpPure *op_AND_74 = LOGAND(op_RSHIFT_71, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_78 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_67), DUP(op_AND_67))), CAST(16, MSB(DUP(op_AND_67)), DUP(op_AND_67))), CAST(32, MSB(CAST(16, MSB(op_AND_74), DUP(op_AND_74))), CAST(16, MSB(DUP(op_AND_74)), DUP(op_AND_74)))); + RzILOpPure *cond_121 = ITE(DUP(op_EQ_61), CAST(64, MSB(op_ADD_78), DUP(op_ADD_78)), VARL("h_tmp42")); + RzILOpPure *op_AND_124 = LOGAND(cond_121, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_127 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_128 = SHIFTL0(CAST(64, IL_FALSE, op_AND_124), op_MUL_127); + RzILOpPure *op_OR_130 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_128); + RzILOpEffect *op_ASSIGN_132 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_130)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((R ...; + RzILOpEffect *seq_133 = SEQN(2, seq_119, op_ASSIGN_132); + + // seq(h_tmp41; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32 ...; + RzILOpEffect *seq_135 = seq_133; + + // seq(seq(h_tmp41; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (( ...; + RzILOpEffect *seq_136 = SEQN(2, seq_135, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp41; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_137 = REPEAT(op_LT_4, seq_136); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp41; seq(seq(HYB(gc ...; + RzILOpEffect *seq_138 = SEQN(2, op_ASSIGN_2, for_137); + + RzILOpEffect *instruction_sequence = seq_138; + return instruction_sequence; +} + +// Rdd = vaddub(Rss,Rtt) +RzILOpEffect *hex_il_op_a2_vaddub(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp43 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp43", VARL("i")); + + // seq(h_tmp43 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x8)))) | (((ut64) (((st64) ((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) + ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff))))) & 0xff)) << i * 0x8))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rss, op_MUL_18); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rtt, op_MUL_26); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_27, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_ADD_34 = ADD(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_22)), CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_30))); + RzILOpPure *op_AND_37 = LOGAND(CAST(64, MSB(op_ADD_34), DUP(op_ADD_34)), SN(64, 0xff)); + RzILOpPure *op_MUL_40 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_41 = SHIFTL0(CAST(64, IL_FALSE, op_AND_37), op_MUL_40); + RzILOpPure *op_OR_43 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_41); + RzILOpEffect *op_ASSIGN_45 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_43)); + + // seq(h_tmp43; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x8))) ...; + RzILOpEffect *seq_47 = op_ASSIGN_45; + + // seq(seq(h_tmp43; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x ...; + RzILOpEffect *seq_48 = SEQN(2, seq_47, seq_8); + + // while ((i < 0x8)) { seq(seq(h_tmp43; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 8)); + RzILOpEffect *for_49 = REPEAT(op_LT_4, seq_48); + + // seq(i = 0x0; while ((i < 0x8)) { seq(seq(h_tmp43; Rdd = ((st64) ...; + RzILOpEffect *seq_50 = SEQN(2, op_ASSIGN_2, for_49); + + RzILOpEffect *instruction_sequence = seq_50; + return instruction_sequence; +} + +// Rdd = vaddub(Rss,Rtt):sat +RzILOpEffect *hex_il_op_a2_vaddubs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp44 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp44", VARL("i")); + + // seq(h_tmp44 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_81 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((extract64(((ut64) ((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) + ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff))))), 0x0, 0x8) == ((ut64) ((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) + ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) + ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff)))) < 0x0) ? ((st64) 0x0) : (0x1 << 0x8) - ((st64) 0x1))); + RzILOpPure *op_MUL_21 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(Rss, op_MUL_21); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_22, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_29 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_30 = SHIFTRA(Rtt, op_MUL_29); + RzILOpPure *op_AND_33 = LOGAND(op_RSHIFT_30, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_ADD_37 = ADD(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_25)), CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_33))); + RzILOpPure *op_MUL_44 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_45 = SHIFTRA(DUP(Rss), op_MUL_44); + RzILOpPure *op_AND_48 = LOGAND(op_RSHIFT_45, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_51 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_52 = SHIFTRA(DUP(Rtt), op_MUL_51); + RzILOpPure *op_AND_55 = LOGAND(op_RSHIFT_52, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_ADD_59 = ADD(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_48)), CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_55))); + RzILOpPure *op_EQ_61 = EQ(EXTRACT64(CAST(64, IL_FALSE, op_ADD_37), SN(32, 0), SN(32, 8)), CAST(64, IL_FALSE, op_ADD_59)); + RzILOpPure *op_MUL_83 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_84 = SHIFTRA(DUP(Rss), op_MUL_83); + RzILOpPure *op_AND_87 = LOGAND(op_RSHIFT_84, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_90 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_91 = SHIFTRA(DUP(Rtt), op_MUL_90); + RzILOpPure *op_AND_94 = LOGAND(op_RSHIFT_91, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_ADD_98 = ADD(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_87)), CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_94))); + RzILOpPure *op_LT_100 = SLT(op_ADD_98, SN(32, 0)); + RzILOpPure *op_LSHIFT_104 = SHIFTL0(SN(64, 1), SN(32, 8)); + RzILOpPure *op_SUB_107 = SUB(op_LSHIFT_104, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_109 = ITE(op_LT_100, CAST(64, MSB(SN(32, 0)), SN(32, 0)), op_SUB_107); + RzILOpEffect *gcc_expr_110 = BRANCH(op_EQ_61, EMPTY(), set_usr_field_call_81); + + // h_tmp45 = HYB(gcc_expr_if ((extract64(((ut64) ((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) + ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff))))), 0x0, 0x8) == ((ut64) ((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) + ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) + ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff)))) < 0x0) ? ((st64) 0x0) : (0x1 << 0x8) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_112 = SETL("h_tmp45", cond_109); + + // seq(HYB(gcc_expr_if ((extract64(((ut64) ((st32) ((ut8) ((Rss >> ...; + RzILOpEffect *seq_113 = SEQN(2, gcc_expr_110, op_ASSIGN_hybrid_tmp_112); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x8)))) | (((ut64) (((extract64(((ut64) ((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) + ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff))))), 0x0, 0x8) == ((ut64) ((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) + ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff)))))) ? ((st64) ((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) + ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff))))) : h_tmp45) & 0xff)) << i * 0x8))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_63 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_64 = SHIFTRA(DUP(Rss), op_MUL_63); + RzILOpPure *op_AND_67 = LOGAND(op_RSHIFT_64, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_70 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_71 = SHIFTRA(DUP(Rtt), op_MUL_70); + RzILOpPure *op_AND_74 = LOGAND(op_RSHIFT_71, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_ADD_78 = ADD(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_67)), CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_74))); + RzILOpPure *cond_115 = ITE(DUP(op_EQ_61), CAST(64, MSB(op_ADD_78), DUP(op_ADD_78)), VARL("h_tmp45")); + RzILOpPure *op_AND_117 = LOGAND(cond_115, SN(64, 0xff)); + RzILOpPure *op_MUL_120 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_121 = SHIFTL0(CAST(64, IL_FALSE, op_AND_117), op_MUL_120); + RzILOpPure *op_OR_123 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_121); + RzILOpEffect *op_ASSIGN_125 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_123)); + + // seq(seq(HYB(gcc_expr_if ((extract64(((ut64) ((st32) ((ut8) ((Rss ...; + RzILOpEffect *seq_126 = SEQN(2, seq_113, op_ASSIGN_125); + + // seq(h_tmp44; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) ((st32) ...; + RzILOpEffect *seq_128 = seq_126; + + // seq(seq(h_tmp44; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) ((s ...; + RzILOpEffect *seq_129 = SEQN(2, seq_128, seq_8); + + // while ((i < 0x8)) { seq(seq(h_tmp44; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) ((s ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 8)); + RzILOpEffect *for_130 = REPEAT(op_LT_4, seq_129); + + // seq(i = 0x0; while ((i < 0x8)) { seq(seq(h_tmp44; seq(seq(HYB(gc ...; + RzILOpEffect *seq_131 = SEQN(2, op_ASSIGN_2, for_130); + + RzILOpEffect *instruction_sequence = seq_131; + return instruction_sequence; +} + +// Rdd = vadduh(Rss,Rtt):sat +RzILOpEffect *hex_il_op_a2_vadduhs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp46 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp46", VARL("i")); + + // seq(h_tmp46 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_81 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((extract64(((ut64) ((st32) ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((ut16) ((Rtt >> i * 0x10) & ((st64) 0xffff))))), 0x0, 0x10) == ((ut64) ((st32) ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((ut16) ((Rtt >> i * 0x10) & ((st64) 0xffff))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((ut16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) < 0x0) ? ((st64) 0x0) : (0x1 << 0x10) - ((st64) 0x1))); + RzILOpPure *op_MUL_21 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(Rss, op_MUL_21); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_22, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_29 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_30 = SHIFTRA(Rtt, op_MUL_29); + RzILOpPure *op_AND_33 = LOGAND(op_RSHIFT_30, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_37 = ADD(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_25)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_33))); + RzILOpPure *op_MUL_44 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_45 = SHIFTRA(DUP(Rss), op_MUL_44); + RzILOpPure *op_AND_48 = LOGAND(op_RSHIFT_45, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_51 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_52 = SHIFTRA(DUP(Rtt), op_MUL_51); + RzILOpPure *op_AND_55 = LOGAND(op_RSHIFT_52, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_59 = ADD(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_48)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_55))); + RzILOpPure *op_EQ_61 = EQ(EXTRACT64(CAST(64, IL_FALSE, op_ADD_37), SN(32, 0), SN(32, 16)), CAST(64, IL_FALSE, op_ADD_59)); + RzILOpPure *op_MUL_83 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_84 = SHIFTRA(DUP(Rss), op_MUL_83); + RzILOpPure *op_AND_87 = LOGAND(op_RSHIFT_84, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_90 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_91 = SHIFTRA(DUP(Rtt), op_MUL_90); + RzILOpPure *op_AND_94 = LOGAND(op_RSHIFT_91, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_98 = ADD(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_87)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_94))); + RzILOpPure *op_LT_100 = SLT(op_ADD_98, SN(32, 0)); + RzILOpPure *op_LSHIFT_104 = SHIFTL0(SN(64, 1), SN(32, 16)); + RzILOpPure *op_SUB_107 = SUB(op_LSHIFT_104, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_109 = ITE(op_LT_100, CAST(64, MSB(SN(32, 0)), SN(32, 0)), op_SUB_107); + RzILOpEffect *gcc_expr_110 = BRANCH(op_EQ_61, EMPTY(), set_usr_field_call_81); + + // h_tmp47 = HYB(gcc_expr_if ((extract64(((ut64) ((st32) ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((ut16) ((Rtt >> i * 0x10) & ((st64) 0xffff))))), 0x0, 0x10) == ((ut64) ((st32) ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((ut16) ((Rtt >> i * 0x10) & ((st64) 0xffff))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((ut16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) < 0x0) ? ((st64) 0x0) : (0x1 << 0x10) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_112 = SETL("h_tmp47", cond_109); + + // seq(HYB(gcc_expr_if ((extract64(((ut64) ((st32) ((ut16) ((Rss >> ...; + RzILOpEffect *seq_113 = SEQN(2, gcc_expr_110, op_ASSIGN_hybrid_tmp_112); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((extract64(((ut64) ((st32) ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((ut16) ((Rtt >> i * 0x10) & ((st64) 0xffff))))), 0x0, 0x10) == ((ut64) ((st32) ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((ut16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))))) ? ((st64) ((st32) ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((ut16) ((Rtt >> i * 0x10) & ((st64) 0xffff))))) : h_tmp47) & ((st64) 0xffff))) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_63 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_64 = SHIFTRA(DUP(Rss), op_MUL_63); + RzILOpPure *op_AND_67 = LOGAND(op_RSHIFT_64, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_70 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_71 = SHIFTRA(DUP(Rtt), op_MUL_70); + RzILOpPure *op_AND_74 = LOGAND(op_RSHIFT_71, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_78 = ADD(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_67)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_74))); + RzILOpPure *cond_115 = ITE(DUP(op_EQ_61), CAST(64, MSB(op_ADD_78), DUP(op_ADD_78)), VARL("h_tmp47")); + RzILOpPure *op_AND_118 = LOGAND(cond_115, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_121 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_122 = SHIFTL0(CAST(64, IL_FALSE, op_AND_118), op_MUL_121); + RzILOpPure *op_OR_124 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_122); + RzILOpEffect *op_ASSIGN_126 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_124)); + + // seq(seq(HYB(gcc_expr_if ((extract64(((ut64) ((st32) ((ut16) ((Rs ...; + RzILOpEffect *seq_127 = SEQN(2, seq_113, op_ASSIGN_126); + + // seq(h_tmp46; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) ((st32) ...; + RzILOpEffect *seq_129 = seq_127; + + // seq(seq(h_tmp46; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) ((s ...; + RzILOpEffect *seq_130 = SEQN(2, seq_129, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp46; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) ((s ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_131 = REPEAT(op_LT_4, seq_130); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp46; seq(seq(HYB(gc ...; + RzILOpEffect *seq_132 = SEQN(2, op_ASSIGN_2, for_131); + + RzILOpEffect *instruction_sequence = seq_132; + return instruction_sequence; +} + +// Rdd = vaddw(Rss,Rtt) +RzILOpEffect *hex_il_op_a2_vaddw(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp48 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp48", VARL("i")); + + // seq(h_tmp48 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ((((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) + ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff))) & 0xffffffff) << i * 0x20)); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffffffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rss, op_MUL_18); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_19, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rtt, op_MUL_26); + RzILOpPure *op_AND_29 = LOGAND(op_RSHIFT_27, SN(64, 0xffffffff)); + RzILOpPure *op_ADD_32 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_21), DUP(op_AND_21))), CAST(32, MSB(DUP(op_AND_21)), DUP(op_AND_21))), CAST(64, MSB(CAST(32, MSB(op_AND_29), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29)))); + RzILOpPure *op_AND_34 = LOGAND(op_ADD_32, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_36 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_37 = SHIFTL0(op_AND_34, op_MUL_36); + RzILOpPure *op_OR_38 = LOGOR(op_AND_15, op_LSHIFT_37); + RzILOpEffect *op_ASSIGN_39 = WRITE_REG(bundle, Rdd_op, op_OR_38); + + // seq(h_tmp48; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ((((st ...; + RzILOpEffect *seq_41 = op_ASSIGN_39; + + // seq(seq(h_tmp48; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | (( ...; + RzILOpEffect *seq_42 = SEQN(2, seq_41, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp48; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | (( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_43 = REPEAT(op_LT_4, seq_42); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp48; Rdd = ((Rdd & ...; + RzILOpEffect *seq_44 = SEQN(2, op_ASSIGN_2, for_43); + + RzILOpEffect *instruction_sequence = seq_44; + return instruction_sequence; +} + +// Rdd = vaddw(Rss,Rtt):sat +RzILOpEffect *hex_il_op_a2_vaddws(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp49 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp49", VARL("i")); + + // seq(h_tmp49 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_74 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) + ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) == ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) + ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) + ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_MUL_21 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(Rss, op_MUL_21); + RzILOpPure *op_AND_24 = LOGAND(op_RSHIFT_22, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_29 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_30 = SHIFTRA(Rtt, op_MUL_29); + RzILOpPure *op_AND_32 = LOGAND(op_RSHIFT_30, SN(64, 0xffffffff)); + RzILOpPure *op_ADD_35 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_24), DUP(op_AND_24))), CAST(32, MSB(DUP(op_AND_24)), DUP(op_AND_24))), CAST(64, MSB(CAST(32, MSB(op_AND_32), DUP(op_AND_32))), CAST(32, MSB(DUP(op_AND_32)), DUP(op_AND_32)))); + RzILOpPure *op_MUL_42 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_43 = SHIFTRA(DUP(Rss), op_MUL_42); + RzILOpPure *op_AND_45 = LOGAND(op_RSHIFT_43, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_49 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_50 = SHIFTRA(DUP(Rtt), op_MUL_49); + RzILOpPure *op_AND_52 = LOGAND(op_RSHIFT_50, SN(64, 0xffffffff)); + RzILOpPure *op_ADD_55 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_45), DUP(op_AND_45))), CAST(32, MSB(DUP(op_AND_45)), DUP(op_AND_45))), CAST(64, MSB(CAST(32, MSB(op_AND_52), DUP(op_AND_52))), CAST(32, MSB(DUP(op_AND_52)), DUP(op_AND_52)))); + RzILOpPure *op_EQ_56 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_35), SN(32, 0), SN(32, 0x20)), op_ADD_55); + RzILOpPure *op_MUL_76 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_77 = SHIFTRA(DUP(Rss), op_MUL_76); + RzILOpPure *op_AND_79 = LOGAND(op_RSHIFT_77, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_83 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_84 = SHIFTRA(DUP(Rtt), op_MUL_83); + RzILOpPure *op_AND_86 = LOGAND(op_RSHIFT_84, SN(64, 0xffffffff)); + RzILOpPure *op_ADD_89 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_79), DUP(op_AND_79))), CAST(32, MSB(DUP(op_AND_79)), DUP(op_AND_79))), CAST(64, MSB(CAST(32, MSB(op_AND_86), DUP(op_AND_86))), CAST(32, MSB(DUP(op_AND_86)), DUP(op_AND_86)))); + RzILOpPure *op_LT_92 = SLT(op_ADD_89, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_97 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_98 = NEG(op_LSHIFT_97); + RzILOpPure *op_LSHIFT_103 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_106 = SUB(op_LSHIFT_103, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_107 = ITE(op_LT_92, op_NEG_98, op_SUB_106); + RzILOpEffect *gcc_expr_108 = BRANCH(op_EQ_56, EMPTY(), set_usr_field_call_74); + + // h_tmp50 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) + ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) == ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) + ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) + ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_110 = SETL("h_tmp50", cond_107); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rss > ...; + RzILOpEffect *seq_111 = SEQN(2, gcc_expr_108, op_ASSIGN_hybrid_tmp_110); + + // Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) + ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) == ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) + ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))) ? ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) + ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff))) : h_tmp50) & 0xffffffff) << i * 0x20)); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffffffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_58 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_59 = SHIFTRA(DUP(Rss), op_MUL_58); + RzILOpPure *op_AND_61 = LOGAND(op_RSHIFT_59, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_65 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_66 = SHIFTRA(DUP(Rtt), op_MUL_65); + RzILOpPure *op_AND_68 = LOGAND(op_RSHIFT_66, SN(64, 0xffffffff)); + RzILOpPure *op_ADD_71 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_61), DUP(op_AND_61))), CAST(32, MSB(DUP(op_AND_61)), DUP(op_AND_61))), CAST(64, MSB(CAST(32, MSB(op_AND_68), DUP(op_AND_68))), CAST(32, MSB(DUP(op_AND_68)), DUP(op_AND_68)))); + RzILOpPure *cond_112 = ITE(DUP(op_EQ_56), op_ADD_71, VARL("h_tmp50")); + RzILOpPure *op_AND_114 = LOGAND(cond_112, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_116 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_117 = SHIFTL0(op_AND_114, op_MUL_116); + RzILOpPure *op_OR_118 = LOGOR(op_AND_15, op_LSHIFT_117); + RzILOpEffect *op_ASSIGN_119 = WRITE_REG(bundle, Rdd_op, op_OR_118); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_120 = SEQN(2, seq_111, op_ASSIGN_119); + + // seq(h_tmp49; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64 ...; + RzILOpEffect *seq_122 = seq_120; + + // seq(seq(h_tmp49; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (( ...; + RzILOpEffect *seq_123 = SEQN(2, seq_122, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp49; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_124 = REPEAT(op_LT_4, seq_123); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp49; seq(seq(HYB(gc ...; + RzILOpEffect *seq_125 = SEQN(2, op_ASSIGN_2, for_124); + + RzILOpEffect *instruction_sequence = seq_125; + return instruction_sequence; +} + +// Rdd = vavgh(Rss,Rtt) +RzILOpEffect *hex_il_op_a2_vavgh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp51 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp51", VARL("i")); + + // seq(h_tmp51 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) ((((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) >> 0x1) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rss, op_MUL_18); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rtt, op_MUL_26); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_27, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_34 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_22), DUP(op_AND_22))), CAST(16, MSB(DUP(op_AND_22)), DUP(op_AND_22))), CAST(32, MSB(CAST(16, MSB(op_AND_30), DUP(op_AND_30))), CAST(16, MSB(DUP(op_AND_30)), DUP(op_AND_30)))); + RzILOpPure *op_RSHIFT_36 = SHIFTRA(op_ADD_34, SN(32, 1)); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_36, SN(32, 0xffff)); + RzILOpPure *op_MUL_41 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_42 = SHIFTL0(CAST(64, IL_FALSE, op_AND_38), op_MUL_41); + RzILOpPure *op_OR_44 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_42); + RzILOpEffect *op_ASSIGN_46 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_44)); + + // seq(h_tmp51; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10 ...; + RzILOpEffect *seq_48 = op_ASSIGN_46; + + // seq(seq(h_tmp51; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_49 = SEQN(2, seq_48, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp51; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_50 = REPEAT(op_LT_4, seq_49); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp51; Rdd = ((st64) ...; + RzILOpEffect *seq_51 = SEQN(2, op_ASSIGN_2, for_50); + + RzILOpEffect *instruction_sequence = seq_51; + return instruction_sequence; +} + +// Rdd = vavgh(Rss,Rtt):crnd +RzILOpEffect *hex_il_op_a2_vavghcr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp52 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp52", VARL("i")); + + // seq(h_tmp52 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((((((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) & 0x3) == 0x3) ? ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) + 0x1 : ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff))))) >> 0x1) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rss, op_MUL_18); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rtt, op_MUL_26); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_27, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_34 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_22), DUP(op_AND_22))), CAST(16, MSB(DUP(op_AND_22)), DUP(op_AND_22))), CAST(32, MSB(CAST(16, MSB(op_AND_30), DUP(op_AND_30))), CAST(16, MSB(DUP(op_AND_30)), DUP(op_AND_30)))); + RzILOpPure *op_AND_36 = LOGAND(op_ADD_34, SN(32, 3)); + RzILOpPure *op_EQ_38 = EQ(op_AND_36, SN(32, 3)); + RzILOpPure *op_MUL_40 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_41 = SHIFTRA(DUP(Rss), op_MUL_40); + RzILOpPure *op_AND_44 = LOGAND(op_RSHIFT_41, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_47 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_48 = SHIFTRA(DUP(Rtt), op_MUL_47); + RzILOpPure *op_AND_51 = LOGAND(op_RSHIFT_48, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_55 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_44), DUP(op_AND_44))), CAST(16, MSB(DUP(op_AND_44)), DUP(op_AND_44))), CAST(32, MSB(CAST(16, MSB(op_AND_51), DUP(op_AND_51))), CAST(16, MSB(DUP(op_AND_51)), DUP(op_AND_51)))); + RzILOpPure *op_ADD_57 = ADD(op_ADD_55, SN(32, 1)); + RzILOpPure *op_MUL_59 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_60 = SHIFTRA(DUP(Rss), op_MUL_59); + RzILOpPure *op_AND_63 = LOGAND(op_RSHIFT_60, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_66 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_67 = SHIFTRA(DUP(Rtt), op_MUL_66); + RzILOpPure *op_AND_70 = LOGAND(op_RSHIFT_67, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_74 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_63), DUP(op_AND_63))), CAST(16, MSB(DUP(op_AND_63)), DUP(op_AND_63))), CAST(32, MSB(CAST(16, MSB(op_AND_70), DUP(op_AND_70))), CAST(16, MSB(DUP(op_AND_70)), DUP(op_AND_70)))); + RzILOpPure *cond_75 = ITE(op_EQ_38, op_ADD_57, op_ADD_74); + RzILOpPure *op_RSHIFT_77 = SHIFTRA(cond_75, SN(32, 1)); + RzILOpPure *op_AND_79 = LOGAND(op_RSHIFT_77, SN(32, 0xffff)); + RzILOpPure *op_MUL_82 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_83 = SHIFTL0(CAST(64, IL_FALSE, op_AND_79), op_MUL_82); + RzILOpPure *op_OR_85 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_83); + RzILOpEffect *op_ASSIGN_87 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_85)); + + // seq(h_tmp52; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10 ...; + RzILOpEffect *seq_89 = op_ASSIGN_87; + + // seq(seq(h_tmp52; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_90 = SEQN(2, seq_89, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp52; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_91 = REPEAT(op_LT_4, seq_90); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp52; Rdd = ((st64) ...; + RzILOpEffect *seq_92 = SEQN(2, op_ASSIGN_2, for_91); + + RzILOpEffect *instruction_sequence = seq_92; + return instruction_sequence; +} + +// Rdd = vavgh(Rss,Rtt):rnd +RzILOpEffect *hex_il_op_a2_vavghr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp53 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp53", VARL("i")); + + // seq(h_tmp53 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) ((((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) + 0x1 >> 0x1) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rss, op_MUL_18); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rtt, op_MUL_26); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_27, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_34 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_22), DUP(op_AND_22))), CAST(16, MSB(DUP(op_AND_22)), DUP(op_AND_22))), CAST(32, MSB(CAST(16, MSB(op_AND_30), DUP(op_AND_30))), CAST(16, MSB(DUP(op_AND_30)), DUP(op_AND_30)))); + RzILOpPure *op_ADD_36 = ADD(op_ADD_34, SN(32, 1)); + RzILOpPure *op_RSHIFT_38 = SHIFTRA(op_ADD_36, SN(32, 1)); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_38, SN(32, 0xffff)); + RzILOpPure *op_MUL_43 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_44 = SHIFTL0(CAST(64, IL_FALSE, op_AND_40), op_MUL_43); + RzILOpPure *op_OR_46 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_44); + RzILOpEffect *op_ASSIGN_48 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_46)); + + // seq(h_tmp53; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10 ...; + RzILOpEffect *seq_50 = op_ASSIGN_48; + + // seq(seq(h_tmp53; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_51 = SEQN(2, seq_50, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp53; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_52 = REPEAT(op_LT_4, seq_51); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp53; Rdd = ((st64) ...; + RzILOpEffect *seq_53 = SEQN(2, op_ASSIGN_2, for_52); + + RzILOpEffect *instruction_sequence = seq_53; + return instruction_sequence; +} + +// Rdd = vavgub(Rss,Rtt) +RzILOpEffect *hex_il_op_a2_vavgub(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp54 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp54", VARL("i")); + + // seq(h_tmp54 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x8)))) | (((ut64) (((st64) (((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) + ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff)))) >> 0x1)) & 0xff)) << i * 0x8))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rss, op_MUL_18); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rtt, op_MUL_26); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_27, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_ADD_34 = ADD(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_22)), CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_30))); + RzILOpPure *op_RSHIFT_36 = SHIFTRA(op_ADD_34, SN(32, 1)); + RzILOpPure *op_AND_39 = LOGAND(CAST(64, MSB(op_RSHIFT_36), DUP(op_RSHIFT_36)), SN(64, 0xff)); + RzILOpPure *op_MUL_42 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_43 = SHIFTL0(CAST(64, IL_FALSE, op_AND_39), op_MUL_42); + RzILOpPure *op_OR_45 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_43); + RzILOpEffect *op_ASSIGN_47 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_45)); + + // seq(h_tmp54; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x8))) ...; + RzILOpEffect *seq_49 = op_ASSIGN_47; + + // seq(seq(h_tmp54; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x ...; + RzILOpEffect *seq_50 = SEQN(2, seq_49, seq_8); + + // while ((i < 0x8)) { seq(seq(h_tmp54; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 8)); + RzILOpEffect *for_51 = REPEAT(op_LT_4, seq_50); + + // seq(i = 0x0; while ((i < 0x8)) { seq(seq(h_tmp54; Rdd = ((st64) ...; + RzILOpEffect *seq_52 = SEQN(2, op_ASSIGN_2, for_51); + + RzILOpEffect *instruction_sequence = seq_52; + return instruction_sequence; +} + +// Rdd = vavgub(Rss,Rtt):rnd +RzILOpEffect *hex_il_op_a2_vavgubr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp55 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp55", VARL("i")); + + // seq(h_tmp55 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x8)))) | (((ut64) (((st64) (((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) + ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff)))) + 0x1 >> 0x1)) & 0xff)) << i * 0x8))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rss, op_MUL_18); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rtt, op_MUL_26); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_27, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_ADD_34 = ADD(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_22)), CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_30))); + RzILOpPure *op_ADD_36 = ADD(op_ADD_34, SN(32, 1)); + RzILOpPure *op_RSHIFT_38 = SHIFTRA(op_ADD_36, SN(32, 1)); + RzILOpPure *op_AND_41 = LOGAND(CAST(64, MSB(op_RSHIFT_38), DUP(op_RSHIFT_38)), SN(64, 0xff)); + RzILOpPure *op_MUL_44 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_45 = SHIFTL0(CAST(64, IL_FALSE, op_AND_41), op_MUL_44); + RzILOpPure *op_OR_47 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_45); + RzILOpEffect *op_ASSIGN_49 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_47)); + + // seq(h_tmp55; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x8))) ...; + RzILOpEffect *seq_51 = op_ASSIGN_49; + + // seq(seq(h_tmp55; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x ...; + RzILOpEffect *seq_52 = SEQN(2, seq_51, seq_8); + + // while ((i < 0x8)) { seq(seq(h_tmp55; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 8)); + RzILOpEffect *for_53 = REPEAT(op_LT_4, seq_52); + + // seq(i = 0x0; while ((i < 0x8)) { seq(seq(h_tmp55; Rdd = ((st64) ...; + RzILOpEffect *seq_54 = SEQN(2, op_ASSIGN_2, for_53); + + RzILOpEffect *instruction_sequence = seq_54; + return instruction_sequence; +} + +// Rdd = vavguh(Rss,Rtt) +RzILOpEffect *hex_il_op_a2_vavguh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp56 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp56", VARL("i")); + + // seq(h_tmp56 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) ((((st32) ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((ut16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) >> 0x1) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rss, op_MUL_18); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rtt, op_MUL_26); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_27, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_34 = ADD(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_22)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_30))); + RzILOpPure *op_RSHIFT_36 = SHIFTRA(op_ADD_34, SN(32, 1)); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_36, SN(32, 0xffff)); + RzILOpPure *op_MUL_41 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_42 = SHIFTL0(CAST(64, IL_FALSE, op_AND_38), op_MUL_41); + RzILOpPure *op_OR_44 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_42); + RzILOpEffect *op_ASSIGN_46 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_44)); + + // seq(h_tmp56; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10 ...; + RzILOpEffect *seq_48 = op_ASSIGN_46; + + // seq(seq(h_tmp56; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_49 = SEQN(2, seq_48, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp56; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_50 = REPEAT(op_LT_4, seq_49); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp56; Rdd = ((st64) ...; + RzILOpEffect *seq_51 = SEQN(2, op_ASSIGN_2, for_50); + + RzILOpEffect *instruction_sequence = seq_51; + return instruction_sequence; +} + +// Rdd = vavguh(Rss,Rtt):rnd +RzILOpEffect *hex_il_op_a2_vavguhr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp57 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp57", VARL("i")); + + // seq(h_tmp57 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) ((((st32) ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((ut16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) + 0x1 >> 0x1) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rss, op_MUL_18); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rtt, op_MUL_26); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_27, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_34 = ADD(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_22)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_30))); + RzILOpPure *op_ADD_36 = ADD(op_ADD_34, SN(32, 1)); + RzILOpPure *op_RSHIFT_38 = SHIFTRA(op_ADD_36, SN(32, 1)); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_38, SN(32, 0xffff)); + RzILOpPure *op_MUL_43 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_44 = SHIFTL0(CAST(64, IL_FALSE, op_AND_40), op_MUL_43); + RzILOpPure *op_OR_46 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_44); + RzILOpEffect *op_ASSIGN_48 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_46)); + + // seq(h_tmp57; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10 ...; + RzILOpEffect *seq_50 = op_ASSIGN_48; + + // seq(seq(h_tmp57; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_51 = SEQN(2, seq_50, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp57; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_52 = REPEAT(op_LT_4, seq_51); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp57; Rdd = ((st64) ...; + RzILOpEffect *seq_53 = SEQN(2, op_ASSIGN_2, for_52); + + RzILOpEffect *instruction_sequence = seq_53; + return instruction_sequence; +} + +// Rdd = vavguw(Rss,Rtt) +RzILOpEffect *hex_il_op_a2_vavguw(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp58 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp58", VARL("i")); + + // seq(h_tmp58 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffffffff << i * 0x20)))) | (((extract64(((ut64) ((ut32) ((Rss >> i * 0x20) & 0xffffffff))), 0x0, 0x20) + extract64(((ut64) ((ut32) ((Rtt >> i * 0x20) & 0xffffffff))), 0x0, 0x20) >> 0x1) & ((ut64) 0xffffffff)) << i * 0x20))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffffffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_21 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(Rss, op_MUL_21); + RzILOpPure *op_AND_24 = LOGAND(op_RSHIFT_22, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_36 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(Rtt, op_MUL_36); + RzILOpPure *op_AND_39 = LOGAND(op_RSHIFT_37, SN(64, 0xffffffff)); + RzILOpPure *op_ADD_46 = ADD(EXTRACT64(CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_24)), SN(32, 0), SN(32, 0x20)), EXTRACT64(CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_39)), SN(32, 0), SN(32, 0x20))); + RzILOpPure *op_RSHIFT_48 = SHIFTR0(op_ADD_46, SN(32, 1)); + RzILOpPure *op_AND_51 = LOGAND(op_RSHIFT_48, CAST(64, IL_FALSE, SN(64, 0xffffffff))); + RzILOpPure *op_MUL_53 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_54 = SHIFTL0(op_AND_51, op_MUL_53); + RzILOpPure *op_OR_56 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_54); + RzILOpEffect *op_ASSIGN_58 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_56)); + + // seq(h_tmp58; Rdd = ((st64) (((ut64) (Rdd & (~(0xffffffff << i * ...; + RzILOpEffect *seq_60 = op_ASSIGN_58; + + // seq(seq(h_tmp58; Rdd = ((st64) (((ut64) (Rdd & (~(0xffffffff << ...; + RzILOpEffect *seq_61 = SEQN(2, seq_60, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp58; Rdd = ((st64) (((ut64) (Rdd & (~(0xffffffff << ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_62 = REPEAT(op_LT_4, seq_61); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp58; Rdd = ((st64) ...; + RzILOpEffect *seq_63 = SEQN(2, op_ASSIGN_2, for_62); + + RzILOpEffect *instruction_sequence = seq_63; + return instruction_sequence; +} + +// Rdd = vavguw(Rss,Rtt):rnd +RzILOpEffect *hex_il_op_a2_vavguwr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp59 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp59", VARL("i")); + + // seq(h_tmp59 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffffffff << i * 0x20)))) | (((extract64(((ut64) ((ut32) ((Rss >> i * 0x20) & 0xffffffff))), 0x0, 0x20) + extract64(((ut64) ((ut32) ((Rtt >> i * 0x20) & 0xffffffff))), 0x0, 0x20) + ((ut64) 0x1) >> 0x1) & ((ut64) 0xffffffff)) << i * 0x20))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffffffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_21 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(Rss, op_MUL_21); + RzILOpPure *op_AND_24 = LOGAND(op_RSHIFT_22, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_36 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(Rtt, op_MUL_36); + RzILOpPure *op_AND_39 = LOGAND(op_RSHIFT_37, SN(64, 0xffffffff)); + RzILOpPure *op_ADD_46 = ADD(EXTRACT64(CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_24)), SN(32, 0), SN(32, 0x20)), EXTRACT64(CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_39)), SN(32, 0), SN(32, 0x20))); + RzILOpPure *op_ADD_49 = ADD(op_ADD_46, CAST(64, IL_FALSE, SN(32, 1))); + RzILOpPure *op_RSHIFT_51 = SHIFTR0(op_ADD_49, SN(32, 1)); + RzILOpPure *op_AND_54 = LOGAND(op_RSHIFT_51, CAST(64, IL_FALSE, SN(64, 0xffffffff))); + RzILOpPure *op_MUL_56 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_57 = SHIFTL0(op_AND_54, op_MUL_56); + RzILOpPure *op_OR_59 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_57); + RzILOpEffect *op_ASSIGN_61 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_59)); + + // seq(h_tmp59; Rdd = ((st64) (((ut64) (Rdd & (~(0xffffffff << i * ...; + RzILOpEffect *seq_63 = op_ASSIGN_61; + + // seq(seq(h_tmp59; Rdd = ((st64) (((ut64) (Rdd & (~(0xffffffff << ...; + RzILOpEffect *seq_64 = SEQN(2, seq_63, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp59; Rdd = ((st64) (((ut64) (Rdd & (~(0xffffffff << ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_65 = REPEAT(op_LT_4, seq_64); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp59; Rdd = ((st64) ...; + RzILOpEffect *seq_66 = SEQN(2, op_ASSIGN_2, for_65); + + RzILOpEffect *instruction_sequence = seq_66; + return instruction_sequence; +} + +// Rdd = vavgw(Rss,Rtt) +RzILOpEffect *hex_il_op_a2_vavgw(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp60 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp60", VARL("i")); + + // seq(h_tmp60 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | (((sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) + sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) >> 0x1) & 0xffffffff) << i * 0x20)); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffffffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_21 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(Rss, op_MUL_21); + RzILOpPure *op_AND_24 = LOGAND(op_RSHIFT_22, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_37 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_38 = SHIFTRA(Rtt, op_MUL_37); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_38, SN(64, 0xffffffff)); + RzILOpPure *op_ADD_48 = ADD(SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_24), DUP(op_AND_24))), CAST(32, MSB(DUP(op_AND_24)), DUP(op_AND_24)))), SN(32, 0), SN(32, 0x20)), SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_40), DUP(op_AND_40))), CAST(32, MSB(DUP(op_AND_40)), DUP(op_AND_40)))), SN(32, 0), SN(32, 0x20))); + RzILOpPure *op_RSHIFT_50 = SHIFTRA(op_ADD_48, SN(32, 1)); + RzILOpPure *op_AND_52 = LOGAND(op_RSHIFT_50, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_54 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_55 = SHIFTL0(op_AND_52, op_MUL_54); + RzILOpPure *op_OR_56 = LOGOR(op_AND_15, op_LSHIFT_55); + RzILOpEffect *op_ASSIGN_57 = WRITE_REG(bundle, Rdd_op, op_OR_56); + + // seq(h_tmp60; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | (((sex ...; + RzILOpEffect *seq_59 = op_ASSIGN_57; + + // seq(seq(h_tmp60; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | (( ...; + RzILOpEffect *seq_60 = SEQN(2, seq_59, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp60; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | (( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_61 = REPEAT(op_LT_4, seq_60); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp60; Rdd = ((Rdd & ...; + RzILOpEffect *seq_62 = SEQN(2, op_ASSIGN_2, for_61); + + RzILOpEffect *instruction_sequence = seq_62; + return instruction_sequence; +} + +// Rdd = vavgw(Rss,Rtt):crnd +RzILOpEffect *hex_il_op_a2_vavgwcr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp61 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp61", VARL("i")); + + // seq(h_tmp61 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ((((((sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) + sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) & ((st64) 0x3)) == ((st64) 0x3)) ? sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) + sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) + ((st64) 0x1) : sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) + sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20)) >> 0x1) & 0xffffffff) << i * 0x20)); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffffffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_21 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(Rss, op_MUL_21); + RzILOpPure *op_AND_24 = LOGAND(op_RSHIFT_22, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_37 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_38 = SHIFTRA(Rtt, op_MUL_37); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_38, SN(64, 0xffffffff)); + RzILOpPure *op_ADD_48 = ADD(SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_24), DUP(op_AND_24))), CAST(32, MSB(DUP(op_AND_24)), DUP(op_AND_24)))), SN(32, 0), SN(32, 0x20)), SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_40), DUP(op_AND_40))), CAST(32, MSB(DUP(op_AND_40)), DUP(op_AND_40)))), SN(32, 0), SN(32, 0x20))); + RzILOpPure *op_AND_51 = LOGAND(op_ADD_48, CAST(64, MSB(SN(32, 3)), SN(32, 3))); + RzILOpPure *op_EQ_54 = EQ(op_AND_51, CAST(64, MSB(SN(32, 3)), SN(32, 3))); + RzILOpPure *op_MUL_59 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_60 = SHIFTRA(DUP(Rss), op_MUL_59); + RzILOpPure *op_AND_62 = LOGAND(op_RSHIFT_60, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_74 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_75 = SHIFTRA(DUP(Rtt), op_MUL_74); + RzILOpPure *op_AND_77 = LOGAND(op_RSHIFT_75, SN(64, 0xffffffff)); + RzILOpPure *op_ADD_85 = ADD(SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_62), DUP(op_AND_62))), CAST(32, MSB(DUP(op_AND_62)), DUP(op_AND_62)))), SN(32, 0), SN(32, 0x20)), SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_77), DUP(op_AND_77))), CAST(32, MSB(DUP(op_AND_77)), DUP(op_AND_77)))), SN(32, 0), SN(32, 0x20))); + RzILOpPure *op_ADD_88 = ADD(op_ADD_85, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *op_MUL_93 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_94 = SHIFTRA(DUP(Rss), op_MUL_93); + RzILOpPure *op_AND_96 = LOGAND(op_RSHIFT_94, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_108 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_109 = SHIFTRA(DUP(Rtt), op_MUL_108); + RzILOpPure *op_AND_111 = LOGAND(op_RSHIFT_109, SN(64, 0xffffffff)); + RzILOpPure *op_ADD_119 = ADD(SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_96), DUP(op_AND_96))), CAST(32, MSB(DUP(op_AND_96)), DUP(op_AND_96)))), SN(32, 0), SN(32, 0x20)), SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_111), DUP(op_AND_111))), CAST(32, MSB(DUP(op_AND_111)), DUP(op_AND_111)))), SN(32, 0), SN(32, 0x20))); + RzILOpPure *cond_120 = ITE(op_EQ_54, op_ADD_88, op_ADD_119); + RzILOpPure *op_RSHIFT_122 = SHIFTRA(cond_120, SN(32, 1)); + RzILOpPure *op_AND_124 = LOGAND(op_RSHIFT_122, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_126 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_127 = SHIFTL0(op_AND_124, op_MUL_126); + RzILOpPure *op_OR_128 = LOGOR(op_AND_15, op_LSHIFT_127); + RzILOpEffect *op_ASSIGN_129 = WRITE_REG(bundle, Rdd_op, op_OR_128); + + // seq(h_tmp61; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | (((((( ...; + RzILOpEffect *seq_131 = op_ASSIGN_129; + + // seq(seq(h_tmp61; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | (( ...; + RzILOpEffect *seq_132 = SEQN(2, seq_131, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp61; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | (( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_133 = REPEAT(op_LT_4, seq_132); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp61; Rdd = ((Rdd & ...; + RzILOpEffect *seq_134 = SEQN(2, op_ASSIGN_2, for_133); + + RzILOpEffect *instruction_sequence = seq_134; + return instruction_sequence; +} + +// Rdd = vavgw(Rss,Rtt):rnd +RzILOpEffect *hex_il_op_a2_vavgwr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp62 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp62", VARL("i")); + + // seq(h_tmp62 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | (((sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) + sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) + ((st64) 0x1) >> 0x1) & 0xffffffff) << i * 0x20)); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffffffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_21 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(Rss, op_MUL_21); + RzILOpPure *op_AND_24 = LOGAND(op_RSHIFT_22, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_37 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_38 = SHIFTRA(Rtt, op_MUL_37); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_38, SN(64, 0xffffffff)); + RzILOpPure *op_ADD_48 = ADD(SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_24), DUP(op_AND_24))), CAST(32, MSB(DUP(op_AND_24)), DUP(op_AND_24)))), SN(32, 0), SN(32, 0x20)), SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_40), DUP(op_AND_40))), CAST(32, MSB(DUP(op_AND_40)), DUP(op_AND_40)))), SN(32, 0), SN(32, 0x20))); + RzILOpPure *op_ADD_51 = ADD(op_ADD_48, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *op_RSHIFT_53 = SHIFTRA(op_ADD_51, SN(32, 1)); + RzILOpPure *op_AND_55 = LOGAND(op_RSHIFT_53, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_57 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_58 = SHIFTL0(op_AND_55, op_MUL_57); + RzILOpPure *op_OR_59 = LOGOR(op_AND_15, op_LSHIFT_58); + RzILOpEffect *op_ASSIGN_60 = WRITE_REG(bundle, Rdd_op, op_OR_59); + + // seq(h_tmp62; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | (((sex ...; + RzILOpEffect *seq_62 = op_ASSIGN_60; + + // seq(seq(h_tmp62; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | (( ...; + RzILOpEffect *seq_63 = SEQN(2, seq_62, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp62; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | (( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_64 = REPEAT(op_LT_4, seq_63); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp62; Rdd = ((Rdd & ...; + RzILOpEffect *seq_65 = SEQN(2, op_ASSIGN_2, for_64); + + RzILOpEffect *instruction_sequence = seq_65; + return instruction_sequence; +} + +// Pd = vcmpb.eq(Rss,Rtt) +RzILOpEffect *hex_il_op_a2_vcmpbeq(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp63 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp63", VARL("i")); + + // seq(h_tmp63 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << i))) | (((((st8) ((Rss >> i * 0x8) & ((st64) 0xff))) == ((st8) ((Rtt >> i * 0x8) & ((st64) 0xff)))) ? 0x1 : 0x0) << i))); + RzILOpPure *op_LSHIFT_11 = SHIFTL0(UN(64, 1), VARL("i")); + RzILOpPure *op_NOT_12 = LOGNOT(op_LSHIFT_11); + RzILOpPure *op_AND_15 = LOGAND(CAST(64, IL_FALSE, CAST(32, MSB(READ_REG(pkt, Pd_op, true)), READ_REG(pkt, Pd_op, true))), op_NOT_12); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rss, op_MUL_18); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rtt, op_MUL_26); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_27, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_EQ_32 = EQ(CAST(8, MSB(op_AND_22), DUP(op_AND_22)), CAST(8, MSB(op_AND_30), DUP(op_AND_30))); + RzILOpPure *ite_cast_ut64_33 = ITE(op_EQ_32, UN(64, 1), UN(64, 0)); + RzILOpPure *op_LSHIFT_34 = SHIFTL0(ite_cast_ut64_33, VARL("i")); + RzILOpPure *op_OR_35 = LOGOR(op_AND_15, op_LSHIFT_34); + RzILOpEffect *op_ASSIGN_37 = WRITE_REG(bundle, Pd_op, CAST(8, IL_FALSE, op_OR_35)); + + // seq(h_tmp63; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << i))) ...; + RzILOpEffect *seq_39 = op_ASSIGN_37; + + // seq(seq(h_tmp63; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ...; + RzILOpEffect *seq_40 = SEQN(2, seq_39, seq_8); + + // while ((i < 0x8)) { seq(seq(h_tmp63; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 8)); + RzILOpEffect *for_41 = REPEAT(op_LT_4, seq_40); + + // seq(i = 0x0; while ((i < 0x8)) { seq(seq(h_tmp63; Pd = ((st8) (( ...; + RzILOpEffect *seq_42 = SEQN(2, op_ASSIGN_2, for_41); + + RzILOpEffect *instruction_sequence = seq_42; + return instruction_sequence; +} + +// Pd = vcmpb.gtu(Rss,Rtt) +RzILOpEffect *hex_il_op_a2_vcmpbgtu(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp64 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp64", VARL("i")); + + // seq(h_tmp64 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << i))) | (((((ut8) ((Rss >> i * 0x8) & ((st64) 0xff))) > ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff)))) ? 0x1 : 0x0) << i))); + RzILOpPure *op_LSHIFT_11 = SHIFTL0(UN(64, 1), VARL("i")); + RzILOpPure *op_NOT_12 = LOGNOT(op_LSHIFT_11); + RzILOpPure *op_AND_15 = LOGAND(CAST(64, IL_FALSE, CAST(32, MSB(READ_REG(pkt, Pd_op, true)), READ_REG(pkt, Pd_op, true))), op_NOT_12); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rss, op_MUL_18); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rtt, op_MUL_26); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_27, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_GT_32 = UGT(CAST(8, IL_FALSE, op_AND_22), CAST(8, IL_FALSE, op_AND_30)); + RzILOpPure *ite_cast_ut64_33 = ITE(op_GT_32, UN(64, 1), UN(64, 0)); + RzILOpPure *op_LSHIFT_34 = SHIFTL0(ite_cast_ut64_33, VARL("i")); + RzILOpPure *op_OR_35 = LOGOR(op_AND_15, op_LSHIFT_34); + RzILOpEffect *op_ASSIGN_37 = WRITE_REG(bundle, Pd_op, CAST(8, IL_FALSE, op_OR_35)); + + // seq(h_tmp64; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << i))) ...; + RzILOpEffect *seq_39 = op_ASSIGN_37; + + // seq(seq(h_tmp64; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ...; + RzILOpEffect *seq_40 = SEQN(2, seq_39, seq_8); + + // while ((i < 0x8)) { seq(seq(h_tmp64; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 8)); + RzILOpEffect *for_41 = REPEAT(op_LT_4, seq_40); + + // seq(i = 0x0; while ((i < 0x8)) { seq(seq(h_tmp64; Pd = ((st8) (( ...; + RzILOpEffect *seq_42 = SEQN(2, op_ASSIGN_2, for_41); + + RzILOpEffect *instruction_sequence = seq_42; + return instruction_sequence; +} + +// Pd = vcmph.eq(Rss,Rtt) +RzILOpEffect *hex_il_op_a2_vcmpheq(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp65 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp65", VARL("i")); + + // seq(h_tmp65 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << i * 0x2))) | (((((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))) == ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) ? 0x1 : 0x0) << i * 0x2))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(UN(64, 1), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_17 = LOGAND(CAST(64, IL_FALSE, CAST(32, MSB(READ_REG(pkt, Pd_op, true)), READ_REG(pkt, Pd_op, true))), op_NOT_14); + RzILOpPure *op_MUL_20 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_21 = SHIFTRA(Rss, op_MUL_20); + RzILOpPure *op_AND_24 = LOGAND(op_RSHIFT_21, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_28 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_29 = SHIFTRA(Rtt, op_MUL_28); + RzILOpPure *op_AND_32 = LOGAND(op_RSHIFT_29, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_EQ_34 = EQ(CAST(16, MSB(op_AND_24), DUP(op_AND_24)), CAST(16, MSB(op_AND_32), DUP(op_AND_32))); + RzILOpPure *ite_cast_ut64_35 = ITE(op_EQ_34, UN(64, 1), UN(64, 0)); + RzILOpPure *op_MUL_37 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_LSHIFT_38 = SHIFTL0(ite_cast_ut64_35, op_MUL_37); + RzILOpPure *op_OR_39 = LOGOR(op_AND_17, op_LSHIFT_38); + RzILOpEffect *op_ASSIGN_41 = WRITE_REG(bundle, Pd_op, CAST(8, IL_FALSE, op_OR_39)); + + // Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << i * 0x2 + 0x1))) | (((((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))) == ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) ? 0x1 : 0x0) << i * 0x2 + 0x1))); + RzILOpPure *op_MUL_45 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_ADD_47 = ADD(op_MUL_45, SN(32, 1)); + RzILOpPure *op_LSHIFT_48 = SHIFTL0(UN(64, 1), op_ADD_47); + RzILOpPure *op_NOT_49 = LOGNOT(op_LSHIFT_48); + RzILOpPure *op_AND_52 = LOGAND(CAST(64, IL_FALSE, CAST(32, MSB(READ_REG(pkt, Pd_op, true)), READ_REG(pkt, Pd_op, true))), op_NOT_49); + RzILOpPure *op_MUL_54 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_55 = SHIFTRA(DUP(Rss), op_MUL_54); + RzILOpPure *op_AND_58 = LOGAND(op_RSHIFT_55, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_61 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_62 = SHIFTRA(DUP(Rtt), op_MUL_61); + RzILOpPure *op_AND_65 = LOGAND(op_RSHIFT_62, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_EQ_67 = EQ(CAST(16, MSB(op_AND_58), DUP(op_AND_58)), CAST(16, MSB(op_AND_65), DUP(op_AND_65))); + RzILOpPure *ite_cast_ut64_68 = ITE(op_EQ_67, UN(64, 1), UN(64, 0)); + RzILOpPure *op_MUL_70 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_ADD_72 = ADD(op_MUL_70, SN(32, 1)); + RzILOpPure *op_LSHIFT_73 = SHIFTL0(ite_cast_ut64_68, op_ADD_72); + RzILOpPure *op_OR_74 = LOGOR(op_AND_52, op_LSHIFT_73); + RzILOpEffect *op_ASSIGN_76 = WRITE_REG(bundle, Pd_op, CAST(8, IL_FALSE, op_OR_74)); + + // seq(h_tmp65; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << i * ...; + RzILOpEffect *seq_78 = SEQN(2, op_ASSIGN_41, op_ASSIGN_76); + + // seq(seq(h_tmp65; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ...; + RzILOpEffect *seq_79 = SEQN(2, seq_78, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp65; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_80 = REPEAT(op_LT_4, seq_79); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp65; Pd = ((st8) (( ...; + RzILOpEffect *seq_81 = SEQN(2, op_ASSIGN_2, for_80); + + RzILOpEffect *instruction_sequence = seq_81; + return instruction_sequence; +} + +// Pd = vcmph.gt(Rss,Rtt) +RzILOpEffect *hex_il_op_a2_vcmphgt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp66 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp66", VARL("i")); + + // seq(h_tmp66 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << i * 0x2))) | (((((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))) > ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) ? 0x1 : 0x0) << i * 0x2))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(UN(64, 1), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_17 = LOGAND(CAST(64, IL_FALSE, CAST(32, MSB(READ_REG(pkt, Pd_op, true)), READ_REG(pkt, Pd_op, true))), op_NOT_14); + RzILOpPure *op_MUL_20 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_21 = SHIFTRA(Rss, op_MUL_20); + RzILOpPure *op_AND_24 = LOGAND(op_RSHIFT_21, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_28 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_29 = SHIFTRA(Rtt, op_MUL_28); + RzILOpPure *op_AND_32 = LOGAND(op_RSHIFT_29, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_GT_34 = SGT(CAST(16, MSB(op_AND_24), DUP(op_AND_24)), CAST(16, MSB(op_AND_32), DUP(op_AND_32))); + RzILOpPure *ite_cast_ut64_35 = ITE(op_GT_34, UN(64, 1), UN(64, 0)); + RzILOpPure *op_MUL_37 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_LSHIFT_38 = SHIFTL0(ite_cast_ut64_35, op_MUL_37); + RzILOpPure *op_OR_39 = LOGOR(op_AND_17, op_LSHIFT_38); + RzILOpEffect *op_ASSIGN_41 = WRITE_REG(bundle, Pd_op, CAST(8, IL_FALSE, op_OR_39)); + + // Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << i * 0x2 + 0x1))) | (((((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))) > ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) ? 0x1 : 0x0) << i * 0x2 + 0x1))); + RzILOpPure *op_MUL_45 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_ADD_47 = ADD(op_MUL_45, SN(32, 1)); + RzILOpPure *op_LSHIFT_48 = SHIFTL0(UN(64, 1), op_ADD_47); + RzILOpPure *op_NOT_49 = LOGNOT(op_LSHIFT_48); + RzILOpPure *op_AND_52 = LOGAND(CAST(64, IL_FALSE, CAST(32, MSB(READ_REG(pkt, Pd_op, true)), READ_REG(pkt, Pd_op, true))), op_NOT_49); + RzILOpPure *op_MUL_54 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_55 = SHIFTRA(DUP(Rss), op_MUL_54); + RzILOpPure *op_AND_58 = LOGAND(op_RSHIFT_55, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_61 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_62 = SHIFTRA(DUP(Rtt), op_MUL_61); + RzILOpPure *op_AND_65 = LOGAND(op_RSHIFT_62, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_GT_67 = SGT(CAST(16, MSB(op_AND_58), DUP(op_AND_58)), CAST(16, MSB(op_AND_65), DUP(op_AND_65))); + RzILOpPure *ite_cast_ut64_68 = ITE(op_GT_67, UN(64, 1), UN(64, 0)); + RzILOpPure *op_MUL_70 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_ADD_72 = ADD(op_MUL_70, SN(32, 1)); + RzILOpPure *op_LSHIFT_73 = SHIFTL0(ite_cast_ut64_68, op_ADD_72); + RzILOpPure *op_OR_74 = LOGOR(op_AND_52, op_LSHIFT_73); + RzILOpEffect *op_ASSIGN_76 = WRITE_REG(bundle, Pd_op, CAST(8, IL_FALSE, op_OR_74)); + + // seq(h_tmp66; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << i * ...; + RzILOpEffect *seq_78 = SEQN(2, op_ASSIGN_41, op_ASSIGN_76); + + // seq(seq(h_tmp66; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ...; + RzILOpEffect *seq_79 = SEQN(2, seq_78, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp66; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_80 = REPEAT(op_LT_4, seq_79); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp66; Pd = ((st8) (( ...; + RzILOpEffect *seq_81 = SEQN(2, op_ASSIGN_2, for_80); + + RzILOpEffect *instruction_sequence = seq_81; + return instruction_sequence; +} + +// Pd = vcmph.gtu(Rss,Rtt) +RzILOpEffect *hex_il_op_a2_vcmphgtu(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp67 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp67", VARL("i")); + + // seq(h_tmp67 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << i * 0x2))) | (((((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff))) > ((ut16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) ? 0x1 : 0x0) << i * 0x2))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(UN(64, 1), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_17 = LOGAND(CAST(64, IL_FALSE, CAST(32, MSB(READ_REG(pkt, Pd_op, true)), READ_REG(pkt, Pd_op, true))), op_NOT_14); + RzILOpPure *op_MUL_20 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_21 = SHIFTRA(Rss, op_MUL_20); + RzILOpPure *op_AND_24 = LOGAND(op_RSHIFT_21, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_28 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_29 = SHIFTRA(Rtt, op_MUL_28); + RzILOpPure *op_AND_32 = LOGAND(op_RSHIFT_29, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_GT_34 = UGT(CAST(16, IL_FALSE, op_AND_24), CAST(16, IL_FALSE, op_AND_32)); + RzILOpPure *ite_cast_ut64_35 = ITE(op_GT_34, UN(64, 1), UN(64, 0)); + RzILOpPure *op_MUL_37 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_LSHIFT_38 = SHIFTL0(ite_cast_ut64_35, op_MUL_37); + RzILOpPure *op_OR_39 = LOGOR(op_AND_17, op_LSHIFT_38); + RzILOpEffect *op_ASSIGN_41 = WRITE_REG(bundle, Pd_op, CAST(8, IL_FALSE, op_OR_39)); + + // Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << i * 0x2 + 0x1))) | (((((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff))) > ((ut16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) ? 0x1 : 0x0) << i * 0x2 + 0x1))); + RzILOpPure *op_MUL_45 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_ADD_47 = ADD(op_MUL_45, SN(32, 1)); + RzILOpPure *op_LSHIFT_48 = SHIFTL0(UN(64, 1), op_ADD_47); + RzILOpPure *op_NOT_49 = LOGNOT(op_LSHIFT_48); + RzILOpPure *op_AND_52 = LOGAND(CAST(64, IL_FALSE, CAST(32, MSB(READ_REG(pkt, Pd_op, true)), READ_REG(pkt, Pd_op, true))), op_NOT_49); + RzILOpPure *op_MUL_54 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_55 = SHIFTRA(DUP(Rss), op_MUL_54); + RzILOpPure *op_AND_58 = LOGAND(op_RSHIFT_55, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_61 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_62 = SHIFTRA(DUP(Rtt), op_MUL_61); + RzILOpPure *op_AND_65 = LOGAND(op_RSHIFT_62, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_GT_67 = UGT(CAST(16, IL_FALSE, op_AND_58), CAST(16, IL_FALSE, op_AND_65)); + RzILOpPure *ite_cast_ut64_68 = ITE(op_GT_67, UN(64, 1), UN(64, 0)); + RzILOpPure *op_MUL_70 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_ADD_72 = ADD(op_MUL_70, SN(32, 1)); + RzILOpPure *op_LSHIFT_73 = SHIFTL0(ite_cast_ut64_68, op_ADD_72); + RzILOpPure *op_OR_74 = LOGOR(op_AND_52, op_LSHIFT_73); + RzILOpEffect *op_ASSIGN_76 = WRITE_REG(bundle, Pd_op, CAST(8, IL_FALSE, op_OR_74)); + + // seq(h_tmp67; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << i * ...; + RzILOpEffect *seq_78 = SEQN(2, op_ASSIGN_41, op_ASSIGN_76); + + // seq(seq(h_tmp67; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ...; + RzILOpEffect *seq_79 = SEQN(2, seq_78, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp67; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_80 = REPEAT(op_LT_4, seq_79); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp67; Pd = ((st8) (( ...; + RzILOpEffect *seq_81 = SEQN(2, op_ASSIGN_2, for_80); + + RzILOpEffect *instruction_sequence = seq_81; + return instruction_sequence; +} + +// Pd = vcmpw.eq(Rss,Rtt) +RzILOpEffect *hex_il_op_a2_vcmpweq(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 j; + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // j = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("j", SN(32, 0)); + + // HYB(++j); + RzILOpEffect *op_INC_5 = SETL("j", INC(VARL("j"), 32)); + + // h_tmp68 = HYB(++j); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp68", VARL("j")); + + // seq(h_tmp68 = HYB(++j); HYB(++j)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << j))) | (((((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))) == ((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff)))) ? 0x1 : 0x0) << j))); + RzILOpPure *op_LSHIFT_11 = SHIFTL0(UN(64, 1), VARL("j")); + RzILOpPure *op_NOT_12 = LOGNOT(op_LSHIFT_11); + RzILOpPure *op_AND_15 = LOGAND(CAST(64, IL_FALSE, CAST(32, MSB(READ_REG(pkt, Pd_op, true)), READ_REG(pkt, Pd_op, true))), op_NOT_12); + RzILOpPure *op_RSHIFT_20 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_20, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_29 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_31 = LOGAND(op_RSHIFT_29, SN(64, 0xffffffff)); + RzILOpPure *op_EQ_34 = EQ(CAST(64, MSB(CAST(32, MSB(op_AND_22), DUP(op_AND_22))), CAST(32, MSB(DUP(op_AND_22)), DUP(op_AND_22))), CAST(64, MSB(CAST(32, MSB(op_AND_31), DUP(op_AND_31))), CAST(32, MSB(DUP(op_AND_31)), DUP(op_AND_31)))); + RzILOpPure *ite_cast_ut64_35 = ITE(op_EQ_34, UN(64, 1), UN(64, 0)); + RzILOpPure *op_LSHIFT_36 = SHIFTL0(ite_cast_ut64_35, VARL("j")); + RzILOpPure *op_OR_37 = LOGOR(op_AND_15, op_LSHIFT_36); + RzILOpEffect *op_ASSIGN_39 = WRITE_REG(bundle, Pd_op, CAST(8, IL_FALSE, op_OR_37)); + + // seq(h_tmp68; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << j))) ...; + RzILOpEffect *seq_41 = op_ASSIGN_39; + + // seq(seq(h_tmp68; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ...; + RzILOpEffect *seq_42 = SEQN(2, seq_41, seq_8); + + // while ((j <= 0x3)) { seq(seq(h_tmp68; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ... }; + RzILOpPure *op_LE_4 = SLE(VARL("j"), SN(32, 3)); + RzILOpEffect *for_43 = REPEAT(op_LE_4, seq_42); + + // seq(j = 0x0; while ((j <= 0x3)) { seq(seq(h_tmp68; Pd = ((st8) ( ...; + RzILOpEffect *seq_44 = SEQN(2, op_ASSIGN_2, for_43); + + // j = 0x4; + RzILOpEffect *op_ASSIGN_47 = SETL("j", SN(32, 4)); + + // HYB(++j); + RzILOpEffect *op_INC_50 = SETL("j", INC(VARL("j"), 32)); + + // h_tmp69 = HYB(++j); + RzILOpEffect *op_ASSIGN_hybrid_tmp_52 = SETL("h_tmp69", VARL("j")); + + // seq(h_tmp69 = HYB(++j); HYB(++j)); + RzILOpEffect *seq_53 = SEQN(2, op_ASSIGN_hybrid_tmp_52, op_INC_50); + + // Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << j))) | (((((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))) == ((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff)))) ? 0x1 : 0x0) << j))); + RzILOpPure *op_LSHIFT_55 = SHIFTL0(UN(64, 1), VARL("j")); + RzILOpPure *op_NOT_56 = LOGNOT(op_LSHIFT_55); + RzILOpPure *op_AND_59 = LOGAND(CAST(64, IL_FALSE, CAST(32, MSB(READ_REG(pkt, Pd_op, true)), READ_REG(pkt, Pd_op, true))), op_NOT_56); + RzILOpPure *op_RSHIFT_63 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_65 = LOGAND(op_RSHIFT_63, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_71 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_73 = LOGAND(op_RSHIFT_71, SN(64, 0xffffffff)); + RzILOpPure *op_EQ_76 = EQ(CAST(64, MSB(CAST(32, MSB(op_AND_65), DUP(op_AND_65))), CAST(32, MSB(DUP(op_AND_65)), DUP(op_AND_65))), CAST(64, MSB(CAST(32, MSB(op_AND_73), DUP(op_AND_73))), CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73)))); + RzILOpPure *ite_cast_ut64_77 = ITE(op_EQ_76, UN(64, 1), UN(64, 0)); + RzILOpPure *op_LSHIFT_78 = SHIFTL0(ite_cast_ut64_77, VARL("j")); + RzILOpPure *op_OR_79 = LOGOR(op_AND_59, op_LSHIFT_78); + RzILOpEffect *op_ASSIGN_81 = WRITE_REG(bundle, Pd_op, CAST(8, IL_FALSE, op_OR_79)); + + // seq(h_tmp69; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << j))) ...; + RzILOpEffect *seq_83 = op_ASSIGN_81; + + // seq(seq(h_tmp69; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ...; + RzILOpEffect *seq_84 = SEQN(2, seq_83, seq_53); + + // while ((j <= 0x7)) { seq(seq(h_tmp69; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ... }; + RzILOpPure *op_LE_49 = SLE(VARL("j"), SN(32, 7)); + RzILOpEffect *for_85 = REPEAT(op_LE_49, seq_84); + + // seq(j = 0x4; while ((j <= 0x7)) { seq(seq(h_tmp69; Pd = ((st8) ( ...; + RzILOpEffect *seq_86 = SEQN(2, op_ASSIGN_47, for_85); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_44, seq_86); + return instruction_sequence; +} + +// Pd = vcmpw.gt(Rss,Rtt) +RzILOpEffect *hex_il_op_a2_vcmpwgt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 j; + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // j = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("j", SN(32, 0)); + + // HYB(++j); + RzILOpEffect *op_INC_5 = SETL("j", INC(VARL("j"), 32)); + + // h_tmp70 = HYB(++j); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp70", VARL("j")); + + // seq(h_tmp70 = HYB(++j); HYB(++j)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << j))) | (((((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))) > ((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff)))) ? 0x1 : 0x0) << j))); + RzILOpPure *op_LSHIFT_11 = SHIFTL0(UN(64, 1), VARL("j")); + RzILOpPure *op_NOT_12 = LOGNOT(op_LSHIFT_11); + RzILOpPure *op_AND_15 = LOGAND(CAST(64, IL_FALSE, CAST(32, MSB(READ_REG(pkt, Pd_op, true)), READ_REG(pkt, Pd_op, true))), op_NOT_12); + RzILOpPure *op_RSHIFT_20 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_20, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_29 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_31 = LOGAND(op_RSHIFT_29, SN(64, 0xffffffff)); + RzILOpPure *op_GT_34 = SGT(CAST(64, MSB(CAST(32, MSB(op_AND_22), DUP(op_AND_22))), CAST(32, MSB(DUP(op_AND_22)), DUP(op_AND_22))), CAST(64, MSB(CAST(32, MSB(op_AND_31), DUP(op_AND_31))), CAST(32, MSB(DUP(op_AND_31)), DUP(op_AND_31)))); + RzILOpPure *ite_cast_ut64_35 = ITE(op_GT_34, UN(64, 1), UN(64, 0)); + RzILOpPure *op_LSHIFT_36 = SHIFTL0(ite_cast_ut64_35, VARL("j")); + RzILOpPure *op_OR_37 = LOGOR(op_AND_15, op_LSHIFT_36); + RzILOpEffect *op_ASSIGN_39 = WRITE_REG(bundle, Pd_op, CAST(8, IL_FALSE, op_OR_37)); + + // seq(h_tmp70; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << j))) ...; + RzILOpEffect *seq_41 = op_ASSIGN_39; + + // seq(seq(h_tmp70; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ...; + RzILOpEffect *seq_42 = SEQN(2, seq_41, seq_8); + + // while ((j <= 0x3)) { seq(seq(h_tmp70; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ... }; + RzILOpPure *op_LE_4 = SLE(VARL("j"), SN(32, 3)); + RzILOpEffect *for_43 = REPEAT(op_LE_4, seq_42); + + // seq(j = 0x0; while ((j <= 0x3)) { seq(seq(h_tmp70; Pd = ((st8) ( ...; + RzILOpEffect *seq_44 = SEQN(2, op_ASSIGN_2, for_43); + + // j = 0x4; + RzILOpEffect *op_ASSIGN_46 = SETL("j", SN(32, 4)); + + // HYB(++j); + RzILOpEffect *op_INC_49 = SETL("j", INC(VARL("j"), 32)); + + // h_tmp71 = HYB(++j); + RzILOpEffect *op_ASSIGN_hybrid_tmp_51 = SETL("h_tmp71", VARL("j")); + + // seq(h_tmp71 = HYB(++j); HYB(++j)); + RzILOpEffect *seq_52 = SEQN(2, op_ASSIGN_hybrid_tmp_51, op_INC_49); + + // Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << j))) | (((((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))) > ((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff)))) ? 0x1 : 0x0) << j))); + RzILOpPure *op_LSHIFT_54 = SHIFTL0(UN(64, 1), VARL("j")); + RzILOpPure *op_NOT_55 = LOGNOT(op_LSHIFT_54); + RzILOpPure *op_AND_58 = LOGAND(CAST(64, IL_FALSE, CAST(32, MSB(READ_REG(pkt, Pd_op, true)), READ_REG(pkt, Pd_op, true))), op_NOT_55); + RzILOpPure *op_RSHIFT_62 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_64 = LOGAND(op_RSHIFT_62, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_70 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_72 = LOGAND(op_RSHIFT_70, SN(64, 0xffffffff)); + RzILOpPure *op_GT_75 = SGT(CAST(64, MSB(CAST(32, MSB(op_AND_64), DUP(op_AND_64))), CAST(32, MSB(DUP(op_AND_64)), DUP(op_AND_64))), CAST(64, MSB(CAST(32, MSB(op_AND_72), DUP(op_AND_72))), CAST(32, MSB(DUP(op_AND_72)), DUP(op_AND_72)))); + RzILOpPure *ite_cast_ut64_76 = ITE(op_GT_75, UN(64, 1), UN(64, 0)); + RzILOpPure *op_LSHIFT_77 = SHIFTL0(ite_cast_ut64_76, VARL("j")); + RzILOpPure *op_OR_78 = LOGOR(op_AND_58, op_LSHIFT_77); + RzILOpEffect *op_ASSIGN_80 = WRITE_REG(bundle, Pd_op, CAST(8, IL_FALSE, op_OR_78)); + + // seq(h_tmp71; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << j))) ...; + RzILOpEffect *seq_82 = op_ASSIGN_80; + + // seq(seq(h_tmp71; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ...; + RzILOpEffect *seq_83 = SEQN(2, seq_82, seq_52); + + // while ((j <= 0x7)) { seq(seq(h_tmp71; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ... }; + RzILOpPure *op_LE_48 = SLE(VARL("j"), SN(32, 7)); + RzILOpEffect *for_84 = REPEAT(op_LE_48, seq_83); + + // seq(j = 0x4; while ((j <= 0x7)) { seq(seq(h_tmp71; Pd = ((st8) ( ...; + RzILOpEffect *seq_85 = SEQN(2, op_ASSIGN_46, for_84); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_44, seq_85); + return instruction_sequence; +} + +// Pd = vcmpw.gtu(Rss,Rtt) +RzILOpEffect *hex_il_op_a2_vcmpwgtu(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 j; + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // j = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("j", SN(32, 0)); + + // HYB(++j); + RzILOpEffect *op_INC_5 = SETL("j", INC(VARL("j"), 32)); + + // h_tmp72 = HYB(++j); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp72", VARL("j")); + + // seq(h_tmp72 = HYB(++j); HYB(++j)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << j))) | (((((ut64) ((ut32) ((Rss >> 0x0) & 0xffffffff))) > ((ut64) ((ut32) ((Rtt >> 0x0) & 0xffffffff)))) ? 0x1 : 0x0) << j))); + RzILOpPure *op_LSHIFT_11 = SHIFTL0(UN(64, 1), VARL("j")); + RzILOpPure *op_NOT_12 = LOGNOT(op_LSHIFT_11); + RzILOpPure *op_AND_15 = LOGAND(CAST(64, IL_FALSE, CAST(32, MSB(READ_REG(pkt, Pd_op, true)), READ_REG(pkt, Pd_op, true))), op_NOT_12); + RzILOpPure *op_RSHIFT_20 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_20, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_29 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_31 = LOGAND(op_RSHIFT_29, SN(64, 0xffffffff)); + RzILOpPure *op_GT_34 = UGT(CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_22)), CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_31))); + RzILOpPure *ite_cast_ut64_35 = ITE(op_GT_34, UN(64, 1), UN(64, 0)); + RzILOpPure *op_LSHIFT_36 = SHIFTL0(ite_cast_ut64_35, VARL("j")); + RzILOpPure *op_OR_37 = LOGOR(op_AND_15, op_LSHIFT_36); + RzILOpEffect *op_ASSIGN_39 = WRITE_REG(bundle, Pd_op, CAST(8, IL_FALSE, op_OR_37)); + + // seq(h_tmp72; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << j))) ...; + RzILOpEffect *seq_41 = op_ASSIGN_39; + + // seq(seq(h_tmp72; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ...; + RzILOpEffect *seq_42 = SEQN(2, seq_41, seq_8); + + // while ((j <= 0x3)) { seq(seq(h_tmp72; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ... }; + RzILOpPure *op_LE_4 = SLE(VARL("j"), SN(32, 3)); + RzILOpEffect *for_43 = REPEAT(op_LE_4, seq_42); + + // seq(j = 0x0; while ((j <= 0x3)) { seq(seq(h_tmp72; Pd = ((st8) ( ...; + RzILOpEffect *seq_44 = SEQN(2, op_ASSIGN_2, for_43); + + // j = 0x4; + RzILOpEffect *op_ASSIGN_47 = SETL("j", SN(32, 4)); + + // HYB(++j); + RzILOpEffect *op_INC_50 = SETL("j", INC(VARL("j"), 32)); + + // h_tmp73 = HYB(++j); + RzILOpEffect *op_ASSIGN_hybrid_tmp_52 = SETL("h_tmp73", VARL("j")); + + // seq(h_tmp73 = HYB(++j); HYB(++j)); + RzILOpEffect *seq_53 = SEQN(2, op_ASSIGN_hybrid_tmp_52, op_INC_50); + + // Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << j))) | (((((ut64) ((ut32) ((Rss >> 0x20) & 0xffffffff))) > ((ut64) ((ut32) ((Rtt >> 0x20) & 0xffffffff)))) ? 0x1 : 0x0) << j))); + RzILOpPure *op_LSHIFT_55 = SHIFTL0(UN(64, 1), VARL("j")); + RzILOpPure *op_NOT_56 = LOGNOT(op_LSHIFT_55); + RzILOpPure *op_AND_59 = LOGAND(CAST(64, IL_FALSE, CAST(32, MSB(READ_REG(pkt, Pd_op, true)), READ_REG(pkt, Pd_op, true))), op_NOT_56); + RzILOpPure *op_RSHIFT_63 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_65 = LOGAND(op_RSHIFT_63, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_71 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_73 = LOGAND(op_RSHIFT_71, SN(64, 0xffffffff)); + RzILOpPure *op_GT_76 = UGT(CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_65)), CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_73))); + RzILOpPure *ite_cast_ut64_77 = ITE(op_GT_76, UN(64, 1), UN(64, 0)); + RzILOpPure *op_LSHIFT_78 = SHIFTL0(ite_cast_ut64_77, VARL("j")); + RzILOpPure *op_OR_79 = LOGOR(op_AND_59, op_LSHIFT_78); + RzILOpEffect *op_ASSIGN_81 = WRITE_REG(bundle, Pd_op, CAST(8, IL_FALSE, op_OR_79)); + + // seq(h_tmp73; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << j))) ...; + RzILOpEffect *seq_83 = op_ASSIGN_81; + + // seq(seq(h_tmp73; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ...; + RzILOpEffect *seq_84 = SEQN(2, seq_83, seq_53); + + // while ((j <= 0x7)) { seq(seq(h_tmp73; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ... }; + RzILOpPure *op_LE_49 = SLE(VARL("j"), SN(32, 7)); + RzILOpEffect *for_85 = REPEAT(op_LE_49, seq_84); + + // seq(j = 0x4; while ((j <= 0x7)) { seq(seq(h_tmp73; Pd = ((st8) ( ...; + RzILOpEffect *seq_86 = SEQN(2, op_ASSIGN_47, for_85); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_44, seq_86); + return instruction_sequence; +} + +// Rdd = vconj(Rss):sat +RzILOpEffect *hex_il_op_a2_vconj(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_51 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))))), 0x0, 0x10) == ((st64) (-((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((-((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff))))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rss, SN(32, 16)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_15, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_21 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18)))); + RzILOpPure *op_RSHIFT_30 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_33 = LOGAND(op_RSHIFT_30, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_36 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_33), DUP(op_AND_33))), CAST(16, MSB(DUP(op_AND_33)), DUP(op_AND_33)))); + RzILOpPure *op_EQ_38 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_NEG_21), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_NEG_36), DUP(op_NEG_36))); + RzILOpPure *op_RSHIFT_55 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_58 = LOGAND(op_RSHIFT_55, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_61 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_58), DUP(op_AND_58))), CAST(16, MSB(DUP(op_AND_58)), DUP(op_AND_58)))); + RzILOpPure *op_LT_63 = SLT(op_NEG_61, SN(32, 0)); + RzILOpPure *op_LSHIFT_68 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_69 = NEG(op_LSHIFT_68); + RzILOpPure *op_LSHIFT_74 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_77 = SUB(op_LSHIFT_74, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_78 = ITE(op_LT_63, op_NEG_69, op_SUB_77); + RzILOpEffect *gcc_expr_79 = BRANCH(op_EQ_38, EMPTY(), set_usr_field_call_51); + + // h_tmp74 = HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))))), 0x0, 0x10) == ((st64) (-((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((-((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff))))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_81 = SETL("h_tmp74", cond_78); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ((Rss ...; + RzILOpEffect *seq_82 = SEQN(2, gcc_expr_79, op_ASSIGN_hybrid_tmp_81); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x10)))) | (((ut64) (((sextract64(((ut64) (-((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))))), 0x0, 0x10) == ((st64) (-((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff))))))) ? ((st64) (-((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))))) : h_tmp74) & ((st64) 0xffff))) << 0x10))); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffff), SN(32, 16)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_42 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_45 = LOGAND(op_RSHIFT_42, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_48 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_45), DUP(op_AND_45))), CAST(16, MSB(DUP(op_AND_45)), DUP(op_AND_45)))); + RzILOpPure *cond_84 = ITE(DUP(op_EQ_38), CAST(64, MSB(op_NEG_48), DUP(op_NEG_48)), VARL("h_tmp74")); + RzILOpPure *op_AND_87 = LOGAND(cond_84, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_92 = SHIFTL0(CAST(64, IL_FALSE, op_AND_87), SN(32, 16)); + RzILOpPure *op_OR_94 = LOGOR(CAST(64, IL_FALSE, op_AND_7), op_LSHIFT_92); + RzILOpEffect *op_ASSIGN_96 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_94)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ( ...; + RzILOpEffect *seq_97 = SEQN(2, seq_82, op_ASSIGN_96); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x0)))) | (((ut64) (((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) & 0xffff)) << 0x0))); + RzILOpPure *op_LSHIFT_103 = SHIFTL0(SN(64, 0xffff), SN(32, 0)); + RzILOpPure *op_NOT_104 = LOGNOT(op_LSHIFT_103); + RzILOpPure *op_AND_105 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_104); + RzILOpPure *op_RSHIFT_109 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_112 = LOGAND(op_RSHIFT_109, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_116 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_112), DUP(op_AND_112))), CAST(16, MSB(DUP(op_AND_112)), DUP(op_AND_112))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_121 = SHIFTL0(CAST(64, IL_FALSE, op_AND_116), SN(32, 0)); + RzILOpPure *op_OR_123 = LOGOR(CAST(64, IL_FALSE, op_AND_105), op_LSHIFT_121); + RzILOpEffect *op_ASSIGN_125 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_123)); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_176 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))))), 0x0, 0x10) == ((st64) (-((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((-((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff))))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_140 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_143 = LOGAND(op_RSHIFT_140, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_146 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_143), DUP(op_AND_143))), CAST(16, MSB(DUP(op_AND_143)), DUP(op_AND_143)))); + RzILOpPure *op_RSHIFT_155 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_158 = LOGAND(op_RSHIFT_155, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_161 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_158), DUP(op_AND_158))), CAST(16, MSB(DUP(op_AND_158)), DUP(op_AND_158)))); + RzILOpPure *op_EQ_163 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_NEG_146), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_NEG_161), DUP(op_NEG_161))); + RzILOpPure *op_RSHIFT_180 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_183 = LOGAND(op_RSHIFT_180, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_186 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_183), DUP(op_AND_183))), CAST(16, MSB(DUP(op_AND_183)), DUP(op_AND_183)))); + RzILOpPure *op_LT_188 = SLT(op_NEG_186, SN(32, 0)); + RzILOpPure *op_LSHIFT_193 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_194 = NEG(op_LSHIFT_193); + RzILOpPure *op_LSHIFT_199 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_202 = SUB(op_LSHIFT_199, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_203 = ITE(op_LT_188, op_NEG_194, op_SUB_202); + RzILOpEffect *gcc_expr_204 = BRANCH(op_EQ_163, EMPTY(), set_usr_field_call_176); + + // h_tmp75 = HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))))), 0x0, 0x10) == ((st64) (-((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((-((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff))))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_206 = SETL("h_tmp75", cond_203); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ((Rss ...; + RzILOpEffect *seq_207 = SEQN(2, gcc_expr_204, op_ASSIGN_hybrid_tmp_206); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x30)))) | (((ut64) (((sextract64(((ut64) (-((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))))), 0x0, 0x10) == ((st64) (-((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff))))))) ? ((st64) (-((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))))) : h_tmp75) & ((st64) 0xffff))) << 0x30))); + RzILOpPure *op_LSHIFT_131 = SHIFTL0(SN(64, 0xffff), SN(32, 0x30)); + RzILOpPure *op_NOT_132 = LOGNOT(op_LSHIFT_131); + RzILOpPure *op_AND_133 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_132); + RzILOpPure *op_RSHIFT_167 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_170 = LOGAND(op_RSHIFT_167, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_173 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_170), DUP(op_AND_170))), CAST(16, MSB(DUP(op_AND_170)), DUP(op_AND_170)))); + RzILOpPure *cond_209 = ITE(DUP(op_EQ_163), CAST(64, MSB(op_NEG_173), DUP(op_NEG_173)), VARL("h_tmp75")); + RzILOpPure *op_AND_212 = LOGAND(cond_209, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_217 = SHIFTL0(CAST(64, IL_FALSE, op_AND_212), SN(32, 0x30)); + RzILOpPure *op_OR_219 = LOGOR(CAST(64, IL_FALSE, op_AND_133), op_LSHIFT_217); + RzILOpEffect *op_ASSIGN_221 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_219)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ( ...; + RzILOpEffect *seq_222 = SEQN(2, seq_207, op_ASSIGN_221); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x20)))) | (((ut64) (((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) & 0xffff)) << 0x20))); + RzILOpPure *op_LSHIFT_228 = SHIFTL0(SN(64, 0xffff), SN(32, 0x20)); + RzILOpPure *op_NOT_229 = LOGNOT(op_LSHIFT_228); + RzILOpPure *op_AND_230 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_229); + RzILOpPure *op_RSHIFT_234 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_237 = LOGAND(op_RSHIFT_234, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_241 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_237), DUP(op_AND_237))), CAST(16, MSB(DUP(op_AND_237)), DUP(op_AND_237))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_246 = SHIFTL0(CAST(64, IL_FALSE, op_AND_241), SN(32, 0x20)); + RzILOpPure *op_OR_248 = LOGOR(CAST(64, IL_FALSE, op_AND_230), op_LSHIFT_246); + RzILOpEffect *op_ASSIGN_250 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_248)); + + RzILOpEffect *instruction_sequence = SEQN(4, seq_97, op_ASSIGN_125, seq_222, op_ASSIGN_250); + return instruction_sequence; +} + +// Rdd = vmaxb(Rtt,Rss) +RzILOpEffect *hex_il_op_a2_vmaxb(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp76 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp76", VARL("i")); + + // seq(h_tmp76 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x8)))) | (((ut64) (((st64) ((st32) ((((st8) ((Rtt >> i * 0x8) & ((st64) 0xff))) > ((st8) ((Rss >> i * 0x8) & ((st64) 0xff)))) ? ((st8) ((Rtt >> i * 0x8) & ((st64) 0xff))) : ((st8) ((Rss >> i * 0x8) & ((st64) 0xff)))))) & 0xff)) << i * 0x8))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rtt, op_MUL_18); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rss, op_MUL_26); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_27, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_GT_32 = SGT(CAST(8, MSB(op_AND_22), DUP(op_AND_22)), CAST(8, MSB(op_AND_30), DUP(op_AND_30))); + RzILOpPure *op_MUL_34 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rtt), op_MUL_34); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_35, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_41 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_42 = SHIFTRA(DUP(Rss), op_MUL_41); + RzILOpPure *op_AND_45 = LOGAND(op_RSHIFT_42, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *cond_47 = ITE(op_GT_32, CAST(8, MSB(op_AND_38), DUP(op_AND_38)), CAST(8, MSB(op_AND_45), DUP(op_AND_45))); + RzILOpPure *op_AND_51 = LOGAND(CAST(64, MSB(CAST(32, MSB(cond_47), DUP(cond_47))), CAST(32, MSB(DUP(cond_47)), DUP(cond_47))), SN(64, 0xff)); + RzILOpPure *op_MUL_54 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_55 = SHIFTL0(CAST(64, IL_FALSE, op_AND_51), op_MUL_54); + RzILOpPure *op_OR_57 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_55); + RzILOpEffect *op_ASSIGN_59 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_57)); + + // seq(h_tmp76; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x8))) ...; + RzILOpEffect *seq_61 = op_ASSIGN_59; + + // seq(seq(h_tmp76; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x ...; + RzILOpEffect *seq_62 = SEQN(2, seq_61, seq_8); + + // while ((i < 0x8)) { seq(seq(h_tmp76; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 8)); + RzILOpEffect *for_63 = REPEAT(op_LT_4, seq_62); + + // seq(i = 0x0; while ((i < 0x8)) { seq(seq(h_tmp76; Rdd = ((st64) ...; + RzILOpEffect *seq_64 = SEQN(2, op_ASSIGN_2, for_63); + + RzILOpEffect *instruction_sequence = seq_64; + return instruction_sequence; +} + +// Rdd = vmaxh(Rtt,Rss) +RzILOpEffect *hex_il_op_a2_vmaxh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp77 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp77", VARL("i")); + + // seq(h_tmp77 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff))) > ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) ? ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff))) : ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rtt, op_MUL_18); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rss, op_MUL_26); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_27, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_GT_32 = SGT(CAST(16, MSB(op_AND_22), DUP(op_AND_22)), CAST(16, MSB(op_AND_30), DUP(op_AND_30))); + RzILOpPure *op_MUL_34 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rtt), op_MUL_34); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_35, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_41 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_42 = SHIFTRA(DUP(Rss), op_MUL_41); + RzILOpPure *op_AND_45 = LOGAND(op_RSHIFT_42, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *cond_47 = ITE(op_GT_32, CAST(16, MSB(op_AND_38), DUP(op_AND_38)), CAST(16, MSB(op_AND_45), DUP(op_AND_45))); + RzILOpPure *op_AND_50 = LOGAND(CAST(32, MSB(cond_47), DUP(cond_47)), SN(32, 0xffff)); + RzILOpPure *op_MUL_53 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_54 = SHIFTL0(CAST(64, IL_FALSE, op_AND_50), op_MUL_53); + RzILOpPure *op_OR_56 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_54); + RzILOpEffect *op_ASSIGN_58 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_56)); + + // seq(h_tmp77; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10 ...; + RzILOpEffect *seq_60 = op_ASSIGN_58; + + // seq(seq(h_tmp77; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_61 = SEQN(2, seq_60, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp77; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_62 = REPEAT(op_LT_4, seq_61); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp77; Rdd = ((st64) ...; + RzILOpEffect *seq_63 = SEQN(2, op_ASSIGN_2, for_62); + + RzILOpEffect *instruction_sequence = seq_63; + return instruction_sequence; +} + +// Rdd = vmaxub(Rtt,Rss) +RzILOpEffect *hex_il_op_a2_vmaxub(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp78 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp78", VARL("i")); + + // seq(h_tmp78 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x8)))) | (((ut64) (((st64) ((st32) ((((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff))) > ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) ? ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff))) : ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))))) & 0xff)) << i * 0x8))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rtt, op_MUL_18); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rss, op_MUL_26); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_27, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_GT_32 = UGT(CAST(8, IL_FALSE, op_AND_22), CAST(8, IL_FALSE, op_AND_30)); + RzILOpPure *op_MUL_34 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rtt), op_MUL_34); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_35, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_41 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_42 = SHIFTRA(DUP(Rss), op_MUL_41); + RzILOpPure *op_AND_45 = LOGAND(op_RSHIFT_42, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *cond_47 = ITE(op_GT_32, CAST(8, IL_FALSE, op_AND_38), CAST(8, IL_FALSE, op_AND_45)); + RzILOpPure *op_AND_51 = LOGAND(CAST(64, MSB(CAST(32, IL_FALSE, cond_47)), CAST(32, IL_FALSE, DUP(cond_47))), SN(64, 0xff)); + RzILOpPure *op_MUL_54 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_55 = SHIFTL0(CAST(64, IL_FALSE, op_AND_51), op_MUL_54); + RzILOpPure *op_OR_57 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_55); + RzILOpEffect *op_ASSIGN_59 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_57)); + + // seq(h_tmp78; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x8))) ...; + RzILOpEffect *seq_61 = op_ASSIGN_59; + + // seq(seq(h_tmp78; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x ...; + RzILOpEffect *seq_62 = SEQN(2, seq_61, seq_8); + + // while ((i < 0x8)) { seq(seq(h_tmp78; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 8)); + RzILOpEffect *for_63 = REPEAT(op_LT_4, seq_62); + + // seq(i = 0x0; while ((i < 0x8)) { seq(seq(h_tmp78; Rdd = ((st64) ...; + RzILOpEffect *seq_64 = SEQN(2, op_ASSIGN_2, for_63); + + RzILOpEffect *instruction_sequence = seq_64; + return instruction_sequence; +} + +// Rdd = vmaxuh(Rtt,Rss) +RzILOpEffect *hex_il_op_a2_vmaxuh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp79 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp79", VARL("i")); + + // seq(h_tmp79 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((((ut16) ((Rtt >> i * 0x10) & ((st64) 0xffff))) > ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) ? ((ut16) ((Rtt >> i * 0x10) & ((st64) 0xffff))) : ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff))))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rtt, op_MUL_18); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rss, op_MUL_26); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_27, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_GT_32 = UGT(CAST(16, IL_FALSE, op_AND_22), CAST(16, IL_FALSE, op_AND_30)); + RzILOpPure *op_MUL_34 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rtt), op_MUL_34); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_35, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_41 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_42 = SHIFTRA(DUP(Rss), op_MUL_41); + RzILOpPure *op_AND_45 = LOGAND(op_RSHIFT_42, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *cond_47 = ITE(op_GT_32, CAST(16, IL_FALSE, op_AND_38), CAST(16, IL_FALSE, op_AND_45)); + RzILOpPure *op_AND_50 = LOGAND(CAST(32, IL_FALSE, cond_47), SN(32, 0xffff)); + RzILOpPure *op_MUL_53 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_54 = SHIFTL0(CAST(64, IL_FALSE, op_AND_50), op_MUL_53); + RzILOpPure *op_OR_56 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_54); + RzILOpEffect *op_ASSIGN_58 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_56)); + + // seq(h_tmp79; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10 ...; + RzILOpEffect *seq_60 = op_ASSIGN_58; + + // seq(seq(h_tmp79; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_61 = SEQN(2, seq_60, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp79; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_62 = REPEAT(op_LT_4, seq_61); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp79; Rdd = ((st64) ...; + RzILOpEffect *seq_63 = SEQN(2, op_ASSIGN_2, for_62); + + RzILOpEffect *instruction_sequence = seq_63; + return instruction_sequence; +} + +// Rdd = vmaxuw(Rtt,Rss) +RzILOpEffect *hex_il_op_a2_vmaxuw(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp80 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp80", VARL("i")); + + // seq(h_tmp80 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffffffff << i * 0x20)))) | ((((((ut64) ((ut32) ((Rtt >> i * 0x20) & 0xffffffff))) > ((ut64) ((ut32) ((Rss >> i * 0x20) & 0xffffffff)))) ? ((ut64) ((ut32) ((Rtt >> i * 0x20) & 0xffffffff))) : ((ut64) ((ut32) ((Rss >> i * 0x20) & 0xffffffff)))) & ((ut64) 0xffffffff)) << i * 0x20))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffffffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rtt, op_MUL_18); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_19, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rss, op_MUL_26); + RzILOpPure *op_AND_29 = LOGAND(op_RSHIFT_27, SN(64, 0xffffffff)); + RzILOpPure *op_GT_32 = UGT(CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_21)), CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_29))); + RzILOpPure *op_MUL_34 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rtt), op_MUL_34); + RzILOpPure *op_AND_37 = LOGAND(op_RSHIFT_35, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_41 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_42 = SHIFTRA(DUP(Rss), op_MUL_41); + RzILOpPure *op_AND_44 = LOGAND(op_RSHIFT_42, SN(64, 0xffffffff)); + RzILOpPure *cond_47 = ITE(op_GT_32, CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_37)), CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_44))); + RzILOpPure *op_AND_50 = LOGAND(cond_47, CAST(64, IL_FALSE, SN(64, 0xffffffff))); + RzILOpPure *op_MUL_52 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_53 = SHIFTL0(op_AND_50, op_MUL_52); + RzILOpPure *op_OR_55 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_53); + RzILOpEffect *op_ASSIGN_57 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_55)); + + // seq(h_tmp80; Rdd = ((st64) (((ut64) (Rdd & (~(0xffffffff << i * ...; + RzILOpEffect *seq_59 = op_ASSIGN_57; + + // seq(seq(h_tmp80; Rdd = ((st64) (((ut64) (Rdd & (~(0xffffffff << ...; + RzILOpEffect *seq_60 = SEQN(2, seq_59, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp80; Rdd = ((st64) (((ut64) (Rdd & (~(0xffffffff << ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_61 = REPEAT(op_LT_4, seq_60); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp80; Rdd = ((st64) ...; + RzILOpEffect *seq_62 = SEQN(2, op_ASSIGN_2, for_61); + + RzILOpEffect *instruction_sequence = seq_62; + return instruction_sequence; +} + +// Rdd = vmaxw(Rtt,Rss) +RzILOpEffect *hex_il_op_a2_vmaxw(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp81 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp81", VARL("i")); + + // seq(h_tmp81 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ((((((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff))) > ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))) ? ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff))) : ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))) & 0xffffffff) << i * 0x20)); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffffffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rtt, op_MUL_18); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_19, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rss, op_MUL_26); + RzILOpPure *op_AND_29 = LOGAND(op_RSHIFT_27, SN(64, 0xffffffff)); + RzILOpPure *op_GT_32 = SGT(CAST(64, MSB(CAST(32, MSB(op_AND_21), DUP(op_AND_21))), CAST(32, MSB(DUP(op_AND_21)), DUP(op_AND_21))), CAST(64, MSB(CAST(32, MSB(op_AND_29), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29)))); + RzILOpPure *op_MUL_34 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rtt), op_MUL_34); + RzILOpPure *op_AND_37 = LOGAND(op_RSHIFT_35, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_41 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_42 = SHIFTRA(DUP(Rss), op_MUL_41); + RzILOpPure *op_AND_44 = LOGAND(op_RSHIFT_42, SN(64, 0xffffffff)); + RzILOpPure *cond_47 = ITE(op_GT_32, CAST(64, MSB(CAST(32, MSB(op_AND_37), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(64, MSB(CAST(32, MSB(op_AND_44), DUP(op_AND_44))), CAST(32, MSB(DUP(op_AND_44)), DUP(op_AND_44)))); + RzILOpPure *op_AND_49 = LOGAND(cond_47, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_51 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_52 = SHIFTL0(op_AND_49, op_MUL_51); + RzILOpPure *op_OR_53 = LOGOR(op_AND_15, op_LSHIFT_52); + RzILOpEffect *op_ASSIGN_54 = WRITE_REG(bundle, Rdd_op, op_OR_53); + + // seq(h_tmp81; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | (((((( ...; + RzILOpEffect *seq_56 = op_ASSIGN_54; + + // seq(seq(h_tmp81; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | (( ...; + RzILOpEffect *seq_57 = SEQN(2, seq_56, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp81; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | (( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_58 = REPEAT(op_LT_4, seq_57); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp81; Rdd = ((Rdd & ...; + RzILOpEffect *seq_59 = SEQN(2, op_ASSIGN_2, for_58); + + RzILOpEffect *instruction_sequence = seq_59; + return instruction_sequence; +} + +// Rdd = vminb(Rtt,Rss) +RzILOpEffect *hex_il_op_a2_vminb(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp82 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp82", VARL("i")); + + // seq(h_tmp82 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x8)))) | (((ut64) (((st64) ((st32) ((((st8) ((Rtt >> i * 0x8) & ((st64) 0xff))) < ((st8) ((Rss >> i * 0x8) & ((st64) 0xff)))) ? ((st8) ((Rtt >> i * 0x8) & ((st64) 0xff))) : ((st8) ((Rss >> i * 0x8) & ((st64) 0xff)))))) & 0xff)) << i * 0x8))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rtt, op_MUL_18); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rss, op_MUL_26); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_27, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_LT_32 = SLT(CAST(8, MSB(op_AND_22), DUP(op_AND_22)), CAST(8, MSB(op_AND_30), DUP(op_AND_30))); + RzILOpPure *op_MUL_34 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rtt), op_MUL_34); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_35, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_41 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_42 = SHIFTRA(DUP(Rss), op_MUL_41); + RzILOpPure *op_AND_45 = LOGAND(op_RSHIFT_42, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *cond_47 = ITE(op_LT_32, CAST(8, MSB(op_AND_38), DUP(op_AND_38)), CAST(8, MSB(op_AND_45), DUP(op_AND_45))); + RzILOpPure *op_AND_51 = LOGAND(CAST(64, MSB(CAST(32, MSB(cond_47), DUP(cond_47))), CAST(32, MSB(DUP(cond_47)), DUP(cond_47))), SN(64, 0xff)); + RzILOpPure *op_MUL_54 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_55 = SHIFTL0(CAST(64, IL_FALSE, op_AND_51), op_MUL_54); + RzILOpPure *op_OR_57 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_55); + RzILOpEffect *op_ASSIGN_59 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_57)); + + // seq(h_tmp82; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x8))) ...; + RzILOpEffect *seq_61 = op_ASSIGN_59; + + // seq(seq(h_tmp82; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x ...; + RzILOpEffect *seq_62 = SEQN(2, seq_61, seq_8); + + // while ((i < 0x8)) { seq(seq(h_tmp82; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 8)); + RzILOpEffect *for_63 = REPEAT(op_LT_4, seq_62); + + // seq(i = 0x0; while ((i < 0x8)) { seq(seq(h_tmp82; Rdd = ((st64) ...; + RzILOpEffect *seq_64 = SEQN(2, op_ASSIGN_2, for_63); + + RzILOpEffect *instruction_sequence = seq_64; + return instruction_sequence; +} + +// Rdd = vminh(Rtt,Rss) +RzILOpEffect *hex_il_op_a2_vminh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp83 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp83", VARL("i")); + + // seq(h_tmp83 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff))) < ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) ? ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff))) : ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rtt, op_MUL_18); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rss, op_MUL_26); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_27, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LT_32 = SLT(CAST(16, MSB(op_AND_22), DUP(op_AND_22)), CAST(16, MSB(op_AND_30), DUP(op_AND_30))); + RzILOpPure *op_MUL_34 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rtt), op_MUL_34); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_35, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_41 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_42 = SHIFTRA(DUP(Rss), op_MUL_41); + RzILOpPure *op_AND_45 = LOGAND(op_RSHIFT_42, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *cond_47 = ITE(op_LT_32, CAST(16, MSB(op_AND_38), DUP(op_AND_38)), CAST(16, MSB(op_AND_45), DUP(op_AND_45))); + RzILOpPure *op_AND_50 = LOGAND(CAST(32, MSB(cond_47), DUP(cond_47)), SN(32, 0xffff)); + RzILOpPure *op_MUL_53 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_54 = SHIFTL0(CAST(64, IL_FALSE, op_AND_50), op_MUL_53); + RzILOpPure *op_OR_56 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_54); + RzILOpEffect *op_ASSIGN_58 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_56)); + + // seq(h_tmp83; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10 ...; + RzILOpEffect *seq_60 = op_ASSIGN_58; + + // seq(seq(h_tmp83; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_61 = SEQN(2, seq_60, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp83; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_62 = REPEAT(op_LT_4, seq_61); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp83; Rdd = ((st64) ...; + RzILOpEffect *seq_63 = SEQN(2, op_ASSIGN_2, for_62); + + RzILOpEffect *instruction_sequence = seq_63; + return instruction_sequence; +} + +// Rdd = vminub(Rtt,Rss) +RzILOpEffect *hex_il_op_a2_vminub(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp84 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp84", VARL("i")); + + // seq(h_tmp84 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x8)))) | (((ut64) (((st64) ((st32) ((((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff))) < ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) ? ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff))) : ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))))) & 0xff)) << i * 0x8))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rtt, op_MUL_18); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rss, op_MUL_26); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_27, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_LT_32 = ULT(CAST(8, IL_FALSE, op_AND_22), CAST(8, IL_FALSE, op_AND_30)); + RzILOpPure *op_MUL_34 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rtt), op_MUL_34); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_35, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_41 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_42 = SHIFTRA(DUP(Rss), op_MUL_41); + RzILOpPure *op_AND_45 = LOGAND(op_RSHIFT_42, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *cond_47 = ITE(op_LT_32, CAST(8, IL_FALSE, op_AND_38), CAST(8, IL_FALSE, op_AND_45)); + RzILOpPure *op_AND_51 = LOGAND(CAST(64, MSB(CAST(32, IL_FALSE, cond_47)), CAST(32, IL_FALSE, DUP(cond_47))), SN(64, 0xff)); + RzILOpPure *op_MUL_54 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_55 = SHIFTL0(CAST(64, IL_FALSE, op_AND_51), op_MUL_54); + RzILOpPure *op_OR_57 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_55); + RzILOpEffect *op_ASSIGN_59 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_57)); + + // seq(h_tmp84; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x8))) ...; + RzILOpEffect *seq_61 = op_ASSIGN_59; + + // seq(seq(h_tmp84; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x ...; + RzILOpEffect *seq_62 = SEQN(2, seq_61, seq_8); + + // while ((i < 0x8)) { seq(seq(h_tmp84; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 8)); + RzILOpEffect *for_63 = REPEAT(op_LT_4, seq_62); + + // seq(i = 0x0; while ((i < 0x8)) { seq(seq(h_tmp84; Rdd = ((st64) ...; + RzILOpEffect *seq_64 = SEQN(2, op_ASSIGN_2, for_63); + + RzILOpEffect *instruction_sequence = seq_64; + return instruction_sequence; +} + +// Rdd = vminuh(Rtt,Rss) +RzILOpEffect *hex_il_op_a2_vminuh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp85 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp85", VARL("i")); + + // seq(h_tmp85 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((((ut16) ((Rtt >> i * 0x10) & ((st64) 0xffff))) < ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) ? ((ut16) ((Rtt >> i * 0x10) & ((st64) 0xffff))) : ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff))))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rtt, op_MUL_18); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rss, op_MUL_26); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_27, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LT_32 = ULT(CAST(16, IL_FALSE, op_AND_22), CAST(16, IL_FALSE, op_AND_30)); + RzILOpPure *op_MUL_34 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rtt), op_MUL_34); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_35, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_41 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_42 = SHIFTRA(DUP(Rss), op_MUL_41); + RzILOpPure *op_AND_45 = LOGAND(op_RSHIFT_42, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *cond_47 = ITE(op_LT_32, CAST(16, IL_FALSE, op_AND_38), CAST(16, IL_FALSE, op_AND_45)); + RzILOpPure *op_AND_50 = LOGAND(CAST(32, IL_FALSE, cond_47), SN(32, 0xffff)); + RzILOpPure *op_MUL_53 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_54 = SHIFTL0(CAST(64, IL_FALSE, op_AND_50), op_MUL_53); + RzILOpPure *op_OR_56 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_54); + RzILOpEffect *op_ASSIGN_58 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_56)); + + // seq(h_tmp85; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10 ...; + RzILOpEffect *seq_60 = op_ASSIGN_58; + + // seq(seq(h_tmp85; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_61 = SEQN(2, seq_60, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp85; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_62 = REPEAT(op_LT_4, seq_61); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp85; Rdd = ((st64) ...; + RzILOpEffect *seq_63 = SEQN(2, op_ASSIGN_2, for_62); + + RzILOpEffect *instruction_sequence = seq_63; + return instruction_sequence; +} + +// Rdd = vminuw(Rtt,Rss) +RzILOpEffect *hex_il_op_a2_vminuw(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp86 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp86", VARL("i")); + + // seq(h_tmp86 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffffffff << i * 0x20)))) | ((((((ut64) ((ut32) ((Rtt >> i * 0x20) & 0xffffffff))) < ((ut64) ((ut32) ((Rss >> i * 0x20) & 0xffffffff)))) ? ((ut64) ((ut32) ((Rtt >> i * 0x20) & 0xffffffff))) : ((ut64) ((ut32) ((Rss >> i * 0x20) & 0xffffffff)))) & ((ut64) 0xffffffff)) << i * 0x20))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffffffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rtt, op_MUL_18); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_19, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rss, op_MUL_26); + RzILOpPure *op_AND_29 = LOGAND(op_RSHIFT_27, SN(64, 0xffffffff)); + RzILOpPure *op_LT_32 = ULT(CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_21)), CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_29))); + RzILOpPure *op_MUL_34 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rtt), op_MUL_34); + RzILOpPure *op_AND_37 = LOGAND(op_RSHIFT_35, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_41 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_42 = SHIFTRA(DUP(Rss), op_MUL_41); + RzILOpPure *op_AND_44 = LOGAND(op_RSHIFT_42, SN(64, 0xffffffff)); + RzILOpPure *cond_47 = ITE(op_LT_32, CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_37)), CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_44))); + RzILOpPure *op_AND_50 = LOGAND(cond_47, CAST(64, IL_FALSE, SN(64, 0xffffffff))); + RzILOpPure *op_MUL_52 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_53 = SHIFTL0(op_AND_50, op_MUL_52); + RzILOpPure *op_OR_55 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_53); + RzILOpEffect *op_ASSIGN_57 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_55)); + + // seq(h_tmp86; Rdd = ((st64) (((ut64) (Rdd & (~(0xffffffff << i * ...; + RzILOpEffect *seq_59 = op_ASSIGN_57; + + // seq(seq(h_tmp86; Rdd = ((st64) (((ut64) (Rdd & (~(0xffffffff << ...; + RzILOpEffect *seq_60 = SEQN(2, seq_59, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp86; Rdd = ((st64) (((ut64) (Rdd & (~(0xffffffff << ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_61 = REPEAT(op_LT_4, seq_60); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp86; Rdd = ((st64) ...; + RzILOpEffect *seq_62 = SEQN(2, op_ASSIGN_2, for_61); + + RzILOpEffect *instruction_sequence = seq_62; + return instruction_sequence; +} + +// Rdd = vminw(Rtt,Rss) +RzILOpEffect *hex_il_op_a2_vminw(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp87 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp87", VARL("i")); + + // seq(h_tmp87 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ((((((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff))) < ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))) ? ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff))) : ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))) & 0xffffffff) << i * 0x20)); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffffffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rtt, op_MUL_18); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_19, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rss, op_MUL_26); + RzILOpPure *op_AND_29 = LOGAND(op_RSHIFT_27, SN(64, 0xffffffff)); + RzILOpPure *op_LT_32 = SLT(CAST(64, MSB(CAST(32, MSB(op_AND_21), DUP(op_AND_21))), CAST(32, MSB(DUP(op_AND_21)), DUP(op_AND_21))), CAST(64, MSB(CAST(32, MSB(op_AND_29), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29)))); + RzILOpPure *op_MUL_34 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rtt), op_MUL_34); + RzILOpPure *op_AND_37 = LOGAND(op_RSHIFT_35, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_41 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_42 = SHIFTRA(DUP(Rss), op_MUL_41); + RzILOpPure *op_AND_44 = LOGAND(op_RSHIFT_42, SN(64, 0xffffffff)); + RzILOpPure *cond_47 = ITE(op_LT_32, CAST(64, MSB(CAST(32, MSB(op_AND_37), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(64, MSB(CAST(32, MSB(op_AND_44), DUP(op_AND_44))), CAST(32, MSB(DUP(op_AND_44)), DUP(op_AND_44)))); + RzILOpPure *op_AND_49 = LOGAND(cond_47, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_51 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_52 = SHIFTL0(op_AND_49, op_MUL_51); + RzILOpPure *op_OR_53 = LOGOR(op_AND_15, op_LSHIFT_52); + RzILOpEffect *op_ASSIGN_54 = WRITE_REG(bundle, Rdd_op, op_OR_53); + + // seq(h_tmp87; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | (((((( ...; + RzILOpEffect *seq_56 = op_ASSIGN_54; + + // seq(seq(h_tmp87; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | (( ...; + RzILOpEffect *seq_57 = SEQN(2, seq_56, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp87; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | (( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_58 = REPEAT(op_LT_4, seq_57); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp87; Rdd = ((Rdd & ...; + RzILOpEffect *seq_59 = SEQN(2, op_ASSIGN_2, for_58); + + RzILOpEffect *instruction_sequence = seq_59; + return instruction_sequence; +} + +// Rdd = vnavgh(Rtt,Rss) +RzILOpEffect *hex_il_op_a2_vnavgh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp88 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp88", VARL("i")); + + // seq(h_tmp88 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) ((((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) >> 0x1) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rtt, op_MUL_18); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rss, op_MUL_26); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_27, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_34 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_22), DUP(op_AND_22))), CAST(16, MSB(DUP(op_AND_22)), DUP(op_AND_22))), CAST(32, MSB(CAST(16, MSB(op_AND_30), DUP(op_AND_30))), CAST(16, MSB(DUP(op_AND_30)), DUP(op_AND_30)))); + RzILOpPure *op_RSHIFT_36 = SHIFTRA(op_SUB_34, SN(32, 1)); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_36, SN(32, 0xffff)); + RzILOpPure *op_MUL_41 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_42 = SHIFTL0(CAST(64, IL_FALSE, op_AND_38), op_MUL_41); + RzILOpPure *op_OR_44 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_42); + RzILOpEffect *op_ASSIGN_46 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_44)); + + // seq(h_tmp88; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10 ...; + RzILOpEffect *seq_48 = op_ASSIGN_46; + + // seq(seq(h_tmp88; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_49 = SEQN(2, seq_48, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp88; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_50 = REPEAT(op_LT_4, seq_49); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp88; Rdd = ((st64) ...; + RzILOpEffect *seq_51 = SEQN(2, op_ASSIGN_2, for_50); + + RzILOpEffect *instruction_sequence = seq_51; + return instruction_sequence; +} + +// Rdd = vnavgh(Rtt,Rss):crnd:sat +RzILOpEffect *hex_il_op_a2_vnavghcr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp89 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp89", VARL("i")); + + // seq(h_tmp89 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_210 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) & 0x3) == 0x3) ? ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + 0x1 : ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))) >> 0x1)), 0x0, 0x10) == ((st64) ((((((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) & 0x3) == 0x3) ? ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + 0x1 : ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))) >> 0x1)))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) & 0x3) == 0x3) ? ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + 0x1 : ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))) >> 0x1) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_MUL_21 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(Rtt, op_MUL_21); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_22, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_29 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_30 = SHIFTRA(Rss, op_MUL_29); + RzILOpPure *op_AND_33 = LOGAND(op_RSHIFT_30, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_37 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_25), DUP(op_AND_25))), CAST(16, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(CAST(16, MSB(op_AND_33), DUP(op_AND_33))), CAST(16, MSB(DUP(op_AND_33)), DUP(op_AND_33)))); + RzILOpPure *op_AND_39 = LOGAND(op_SUB_37, SN(32, 3)); + RzILOpPure *op_EQ_41 = EQ(op_AND_39, SN(32, 3)); + RzILOpPure *op_MUL_43 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_44 = SHIFTRA(DUP(Rtt), op_MUL_43); + RzILOpPure *op_AND_47 = LOGAND(op_RSHIFT_44, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_50 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_51 = SHIFTRA(DUP(Rss), op_MUL_50); + RzILOpPure *op_AND_54 = LOGAND(op_RSHIFT_51, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_58 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_47), DUP(op_AND_47))), CAST(16, MSB(DUP(op_AND_47)), DUP(op_AND_47))), CAST(32, MSB(CAST(16, MSB(op_AND_54), DUP(op_AND_54))), CAST(16, MSB(DUP(op_AND_54)), DUP(op_AND_54)))); + RzILOpPure *op_ADD_60 = ADD(op_SUB_58, SN(32, 1)); + RzILOpPure *op_MUL_62 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_63 = SHIFTRA(DUP(Rtt), op_MUL_62); + RzILOpPure *op_AND_66 = LOGAND(op_RSHIFT_63, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_69 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_70 = SHIFTRA(DUP(Rss), op_MUL_69); + RzILOpPure *op_AND_73 = LOGAND(op_RSHIFT_70, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_77 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_66), DUP(op_AND_66))), CAST(16, MSB(DUP(op_AND_66)), DUP(op_AND_66))), CAST(32, MSB(CAST(16, MSB(op_AND_73), DUP(op_AND_73))), CAST(16, MSB(DUP(op_AND_73)), DUP(op_AND_73)))); + RzILOpPure *cond_78 = ITE(op_EQ_41, op_ADD_60, op_SUB_77); + RzILOpPure *op_RSHIFT_80 = SHIFTRA(cond_78, SN(32, 1)); + RzILOpPure *op_MUL_87 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_88 = SHIFTRA(DUP(Rtt), op_MUL_87); + RzILOpPure *op_AND_91 = LOGAND(op_RSHIFT_88, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_94 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_95 = SHIFTRA(DUP(Rss), op_MUL_94); + RzILOpPure *op_AND_98 = LOGAND(op_RSHIFT_95, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_102 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_91), DUP(op_AND_91))), CAST(16, MSB(DUP(op_AND_91)), DUP(op_AND_91))), CAST(32, MSB(CAST(16, MSB(op_AND_98), DUP(op_AND_98))), CAST(16, MSB(DUP(op_AND_98)), DUP(op_AND_98)))); + RzILOpPure *op_AND_104 = LOGAND(op_SUB_102, SN(32, 3)); + RzILOpPure *op_EQ_106 = EQ(op_AND_104, SN(32, 3)); + RzILOpPure *op_MUL_108 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_109 = SHIFTRA(DUP(Rtt), op_MUL_108); + RzILOpPure *op_AND_112 = LOGAND(op_RSHIFT_109, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_115 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_116 = SHIFTRA(DUP(Rss), op_MUL_115); + RzILOpPure *op_AND_119 = LOGAND(op_RSHIFT_116, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_123 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_112), DUP(op_AND_112))), CAST(16, MSB(DUP(op_AND_112)), DUP(op_AND_112))), CAST(32, MSB(CAST(16, MSB(op_AND_119), DUP(op_AND_119))), CAST(16, MSB(DUP(op_AND_119)), DUP(op_AND_119)))); + RzILOpPure *op_ADD_125 = ADD(op_SUB_123, SN(32, 1)); + RzILOpPure *op_MUL_127 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_128 = SHIFTRA(DUP(Rtt), op_MUL_127); + RzILOpPure *op_AND_131 = LOGAND(op_RSHIFT_128, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_134 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_135 = SHIFTRA(DUP(Rss), op_MUL_134); + RzILOpPure *op_AND_138 = LOGAND(op_RSHIFT_135, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_142 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_131), DUP(op_AND_131))), CAST(16, MSB(DUP(op_AND_131)), DUP(op_AND_131))), CAST(32, MSB(CAST(16, MSB(op_AND_138), DUP(op_AND_138))), CAST(16, MSB(DUP(op_AND_138)), DUP(op_AND_138)))); + RzILOpPure *cond_143 = ITE(op_EQ_106, op_ADD_125, op_SUB_142); + RzILOpPure *op_RSHIFT_145 = SHIFTRA(cond_143, SN(32, 1)); + RzILOpPure *op_EQ_147 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_80), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_RSHIFT_145), DUP(op_RSHIFT_145))); + RzILOpPure *op_MUL_212 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_213 = SHIFTRA(DUP(Rtt), op_MUL_212); + RzILOpPure *op_AND_216 = LOGAND(op_RSHIFT_213, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_219 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_220 = SHIFTRA(DUP(Rss), op_MUL_219); + RzILOpPure *op_AND_223 = LOGAND(op_RSHIFT_220, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_227 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_216), DUP(op_AND_216))), CAST(16, MSB(DUP(op_AND_216)), DUP(op_AND_216))), CAST(32, MSB(CAST(16, MSB(op_AND_223), DUP(op_AND_223))), CAST(16, MSB(DUP(op_AND_223)), DUP(op_AND_223)))); + RzILOpPure *op_AND_229 = LOGAND(op_SUB_227, SN(32, 3)); + RzILOpPure *op_EQ_231 = EQ(op_AND_229, SN(32, 3)); + RzILOpPure *op_MUL_233 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_234 = SHIFTRA(DUP(Rtt), op_MUL_233); + RzILOpPure *op_AND_237 = LOGAND(op_RSHIFT_234, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_240 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_241 = SHIFTRA(DUP(Rss), op_MUL_240); + RzILOpPure *op_AND_244 = LOGAND(op_RSHIFT_241, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_248 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_237), DUP(op_AND_237))), CAST(16, MSB(DUP(op_AND_237)), DUP(op_AND_237))), CAST(32, MSB(CAST(16, MSB(op_AND_244), DUP(op_AND_244))), CAST(16, MSB(DUP(op_AND_244)), DUP(op_AND_244)))); + RzILOpPure *op_ADD_250 = ADD(op_SUB_248, SN(32, 1)); + RzILOpPure *op_MUL_252 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_253 = SHIFTRA(DUP(Rtt), op_MUL_252); + RzILOpPure *op_AND_256 = LOGAND(op_RSHIFT_253, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_259 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_260 = SHIFTRA(DUP(Rss), op_MUL_259); + RzILOpPure *op_AND_263 = LOGAND(op_RSHIFT_260, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_267 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_256), DUP(op_AND_256))), CAST(16, MSB(DUP(op_AND_256)), DUP(op_AND_256))), CAST(32, MSB(CAST(16, MSB(op_AND_263), DUP(op_AND_263))), CAST(16, MSB(DUP(op_AND_263)), DUP(op_AND_263)))); + RzILOpPure *cond_268 = ITE(op_EQ_231, op_ADD_250, op_SUB_267); + RzILOpPure *op_RSHIFT_270 = SHIFTRA(cond_268, SN(32, 1)); + RzILOpPure *op_LT_272 = SLT(op_RSHIFT_270, SN(32, 0)); + RzILOpPure *op_LSHIFT_277 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_278 = NEG(op_LSHIFT_277); + RzILOpPure *op_LSHIFT_283 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_286 = SUB(op_LSHIFT_283, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_287 = ITE(op_LT_272, op_NEG_278, op_SUB_286); + RzILOpEffect *gcc_expr_288 = BRANCH(op_EQ_147, EMPTY(), set_usr_field_call_210); + + // h_tmp90 = HYB(gcc_expr_if ((sextract64(((ut64) ((((((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) & 0x3) == 0x3) ? ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + 0x1 : ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))) >> 0x1)), 0x0, 0x10) == ((st64) ((((((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) & 0x3) == 0x3) ? ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + 0x1 : ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))) >> 0x1)))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) & 0x3) == 0x3) ? ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + 0x1 : ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))) >> 0x1) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_290 = SETL("h_tmp90", cond_287); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((((st32) ((st16) ((R ...; + RzILOpEffect *seq_291 = SEQN(2, gcc_expr_288, op_ASSIGN_hybrid_tmp_290); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((sextract64(((ut64) ((((((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) & 0x3) == 0x3) ? ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + 0x1 : ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))) >> 0x1)), 0x0, 0x10) == ((st64) ((((((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) & 0x3) == 0x3) ? ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + 0x1 : ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))) >> 0x1))) ? ((st64) ((((((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) & 0x3) == 0x3) ? ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + 0x1 : ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))) >> 0x1)) : h_tmp90) & ((st64) 0xffff))) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_149 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_150 = SHIFTRA(DUP(Rtt), op_MUL_149); + RzILOpPure *op_AND_153 = LOGAND(op_RSHIFT_150, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_156 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_157 = SHIFTRA(DUP(Rss), op_MUL_156); + RzILOpPure *op_AND_160 = LOGAND(op_RSHIFT_157, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_164 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_153), DUP(op_AND_153))), CAST(16, MSB(DUP(op_AND_153)), DUP(op_AND_153))), CAST(32, MSB(CAST(16, MSB(op_AND_160), DUP(op_AND_160))), CAST(16, MSB(DUP(op_AND_160)), DUP(op_AND_160)))); + RzILOpPure *op_AND_166 = LOGAND(op_SUB_164, SN(32, 3)); + RzILOpPure *op_EQ_168 = EQ(op_AND_166, SN(32, 3)); + RzILOpPure *op_MUL_170 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_171 = SHIFTRA(DUP(Rtt), op_MUL_170); + RzILOpPure *op_AND_174 = LOGAND(op_RSHIFT_171, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_177 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_178 = SHIFTRA(DUP(Rss), op_MUL_177); + RzILOpPure *op_AND_181 = LOGAND(op_RSHIFT_178, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_185 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_174), DUP(op_AND_174))), CAST(16, MSB(DUP(op_AND_174)), DUP(op_AND_174))), CAST(32, MSB(CAST(16, MSB(op_AND_181), DUP(op_AND_181))), CAST(16, MSB(DUP(op_AND_181)), DUP(op_AND_181)))); + RzILOpPure *op_ADD_187 = ADD(op_SUB_185, SN(32, 1)); + RzILOpPure *op_MUL_189 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_190 = SHIFTRA(DUP(Rtt), op_MUL_189); + RzILOpPure *op_AND_193 = LOGAND(op_RSHIFT_190, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_196 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_197 = SHIFTRA(DUP(Rss), op_MUL_196); + RzILOpPure *op_AND_200 = LOGAND(op_RSHIFT_197, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_204 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_193), DUP(op_AND_193))), CAST(16, MSB(DUP(op_AND_193)), DUP(op_AND_193))), CAST(32, MSB(CAST(16, MSB(op_AND_200), DUP(op_AND_200))), CAST(16, MSB(DUP(op_AND_200)), DUP(op_AND_200)))); + RzILOpPure *cond_205 = ITE(op_EQ_168, op_ADD_187, op_SUB_204); + RzILOpPure *op_RSHIFT_207 = SHIFTRA(cond_205, SN(32, 1)); + RzILOpPure *cond_293 = ITE(DUP(op_EQ_147), CAST(64, MSB(op_RSHIFT_207), DUP(op_RSHIFT_207)), VARL("h_tmp90")); + RzILOpPure *op_AND_296 = LOGAND(cond_293, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_299 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_300 = SHIFTL0(CAST(64, IL_FALSE, op_AND_296), op_MUL_299); + RzILOpPure *op_OR_302 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_300); + RzILOpEffect *op_ASSIGN_304 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_302)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((((st32) ((st16) ...; + RzILOpEffect *seq_305 = SEQN(2, seq_291, op_ASSIGN_304); + + // seq(h_tmp89; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((((( ...; + RzILOpEffect *seq_307 = seq_305; + + // seq(seq(h_tmp89; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (( ...; + RzILOpEffect *seq_308 = SEQN(2, seq_307, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp89; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_309 = REPEAT(op_LT_4, seq_308); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp89; seq(seq(HYB(gc ...; + RzILOpEffect *seq_310 = SEQN(2, op_ASSIGN_2, for_309); + + RzILOpEffect *instruction_sequence = seq_310; + return instruction_sequence; +} + +// Rdd = vnavgh(Rtt,Rss):rnd:sat +RzILOpEffect *hex_il_op_a2_vnavghr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp91 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp91", VARL("i")); + + // seq(h_tmp91 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_93 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + 0x1 >> 0x1)), 0x0, 0x10) == ((st64) (((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + 0x1 >> 0x1)))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + 0x1 >> 0x1) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_MUL_21 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(Rtt, op_MUL_21); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_22, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_29 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_30 = SHIFTRA(Rss, op_MUL_29); + RzILOpPure *op_AND_33 = LOGAND(op_RSHIFT_30, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_37 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_25), DUP(op_AND_25))), CAST(16, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(CAST(16, MSB(op_AND_33), DUP(op_AND_33))), CAST(16, MSB(DUP(op_AND_33)), DUP(op_AND_33)))); + RzILOpPure *op_ADD_39 = ADD(op_SUB_37, SN(32, 1)); + RzILOpPure *op_RSHIFT_41 = SHIFTRA(op_ADD_39, SN(32, 1)); + RzILOpPure *op_MUL_48 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_49 = SHIFTRA(DUP(Rtt), op_MUL_48); + RzILOpPure *op_AND_52 = LOGAND(op_RSHIFT_49, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_55 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_56 = SHIFTRA(DUP(Rss), op_MUL_55); + RzILOpPure *op_AND_59 = LOGAND(op_RSHIFT_56, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_63 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_52), DUP(op_AND_52))), CAST(16, MSB(DUP(op_AND_52)), DUP(op_AND_52))), CAST(32, MSB(CAST(16, MSB(op_AND_59), DUP(op_AND_59))), CAST(16, MSB(DUP(op_AND_59)), DUP(op_AND_59)))); + RzILOpPure *op_ADD_65 = ADD(op_SUB_63, SN(32, 1)); + RzILOpPure *op_RSHIFT_67 = SHIFTRA(op_ADD_65, SN(32, 1)); + RzILOpPure *op_EQ_69 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_41), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_RSHIFT_67), DUP(op_RSHIFT_67))); + RzILOpPure *op_MUL_95 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_96 = SHIFTRA(DUP(Rtt), op_MUL_95); + RzILOpPure *op_AND_99 = LOGAND(op_RSHIFT_96, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_102 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_103 = SHIFTRA(DUP(Rss), op_MUL_102); + RzILOpPure *op_AND_106 = LOGAND(op_RSHIFT_103, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_110 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_99), DUP(op_AND_99))), CAST(16, MSB(DUP(op_AND_99)), DUP(op_AND_99))), CAST(32, MSB(CAST(16, MSB(op_AND_106), DUP(op_AND_106))), CAST(16, MSB(DUP(op_AND_106)), DUP(op_AND_106)))); + RzILOpPure *op_ADD_112 = ADD(op_SUB_110, SN(32, 1)); + RzILOpPure *op_RSHIFT_114 = SHIFTRA(op_ADD_112, SN(32, 1)); + RzILOpPure *op_LT_116 = SLT(op_RSHIFT_114, SN(32, 0)); + RzILOpPure *op_LSHIFT_121 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_122 = NEG(op_LSHIFT_121); + RzILOpPure *op_LSHIFT_127 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_130 = SUB(op_LSHIFT_127, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_131 = ITE(op_LT_116, op_NEG_122, op_SUB_130); + RzILOpEffect *gcc_expr_132 = BRANCH(op_EQ_69, EMPTY(), set_usr_field_call_93); + + // h_tmp92 = HYB(gcc_expr_if ((sextract64(((ut64) (((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + 0x1 >> 0x1)), 0x0, 0x10) == ((st64) (((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + 0x1 >> 0x1)))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + 0x1 >> 0x1) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_134 = SETL("h_tmp92", cond_131); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st32) ((st16) ((Rtt ...; + RzILOpEffect *seq_135 = SEQN(2, gcc_expr_132, op_ASSIGN_hybrid_tmp_134); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((sextract64(((ut64) (((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + 0x1 >> 0x1)), 0x0, 0x10) == ((st64) (((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + 0x1 >> 0x1))) ? ((st64) (((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + 0x1 >> 0x1)) : h_tmp92) & ((st64) 0xffff))) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_71 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_72 = SHIFTRA(DUP(Rtt), op_MUL_71); + RzILOpPure *op_AND_75 = LOGAND(op_RSHIFT_72, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_78 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_79 = SHIFTRA(DUP(Rss), op_MUL_78); + RzILOpPure *op_AND_82 = LOGAND(op_RSHIFT_79, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_86 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_75), DUP(op_AND_75))), CAST(16, MSB(DUP(op_AND_75)), DUP(op_AND_75))), CAST(32, MSB(CAST(16, MSB(op_AND_82), DUP(op_AND_82))), CAST(16, MSB(DUP(op_AND_82)), DUP(op_AND_82)))); + RzILOpPure *op_ADD_88 = ADD(op_SUB_86, SN(32, 1)); + RzILOpPure *op_RSHIFT_90 = SHIFTRA(op_ADD_88, SN(32, 1)); + RzILOpPure *cond_137 = ITE(DUP(op_EQ_69), CAST(64, MSB(op_RSHIFT_90), DUP(op_RSHIFT_90)), VARL("h_tmp92")); + RzILOpPure *op_AND_140 = LOGAND(cond_137, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_143 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_144 = SHIFTL0(CAST(64, IL_FALSE, op_AND_140), op_MUL_143); + RzILOpPure *op_OR_146 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_144); + RzILOpEffect *op_ASSIGN_148 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_146)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st32) ((st16) (( ...; + RzILOpEffect *seq_149 = SEQN(2, seq_135, op_ASSIGN_148); + + // seq(h_tmp91; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st3 ...; + RzILOpEffect *seq_151 = seq_149; + + // seq(seq(h_tmp91; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (( ...; + RzILOpEffect *seq_152 = SEQN(2, seq_151, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp91; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_153 = REPEAT(op_LT_4, seq_152); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp91; seq(seq(HYB(gc ...; + RzILOpEffect *seq_154 = SEQN(2, op_ASSIGN_2, for_153); + + RzILOpEffect *instruction_sequence = seq_154; + return instruction_sequence; +} + +// Rdd = vnavgw(Rtt,Rss) +RzILOpEffect *hex_il_op_a2_vnavgw(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp93 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp93", VARL("i")); + + // seq(h_tmp93 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | (((sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) >> 0x1) & 0xffffffff) << i * 0x20)); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffffffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_21 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(Rtt, op_MUL_21); + RzILOpPure *op_AND_24 = LOGAND(op_RSHIFT_22, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_37 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_38 = SHIFTRA(Rss, op_MUL_37); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_38, SN(64, 0xffffffff)); + RzILOpPure *op_SUB_48 = SUB(SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_24), DUP(op_AND_24))), CAST(32, MSB(DUP(op_AND_24)), DUP(op_AND_24)))), SN(32, 0), SN(32, 0x20)), SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_40), DUP(op_AND_40))), CAST(32, MSB(DUP(op_AND_40)), DUP(op_AND_40)))), SN(32, 0), SN(32, 0x20))); + RzILOpPure *op_RSHIFT_50 = SHIFTRA(op_SUB_48, SN(32, 1)); + RzILOpPure *op_AND_52 = LOGAND(op_RSHIFT_50, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_54 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_55 = SHIFTL0(op_AND_52, op_MUL_54); + RzILOpPure *op_OR_56 = LOGOR(op_AND_15, op_LSHIFT_55); + RzILOpEffect *op_ASSIGN_57 = WRITE_REG(bundle, Rdd_op, op_OR_56); + + // seq(h_tmp93; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | (((sex ...; + RzILOpEffect *seq_59 = op_ASSIGN_57; + + // seq(seq(h_tmp93; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | (( ...; + RzILOpEffect *seq_60 = SEQN(2, seq_59, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp93; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | (( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_61 = REPEAT(op_LT_4, seq_60); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp93; Rdd = ((Rdd & ...; + RzILOpEffect *seq_62 = SEQN(2, op_ASSIGN_2, for_61); + + RzILOpEffect *instruction_sequence = seq_62; + return instruction_sequence; +} + +// Rdd = vnavgw(Rtt,Rss):crnd:sat +RzILOpEffect *hex_il_op_a2_vnavgwcr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp94 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp94", VARL("i")); + + // seq(h_tmp94 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_344 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) & ((st64) 0x3)) == ((st64) 0x3)) ? sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) + ((st64) 0x1) : sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20)) >> 0x1)), 0x0, 0x20) == ((((sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) & ((st64) 0x3)) == ((st64) 0x3)) ? sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) + ((st64) 0x1) : sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20)) >> 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) & ((st64) 0x3)) == ((st64) 0x3)) ? sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) + ((st64) 0x1) : sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20)) >> 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_MUL_24 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_25 = SHIFTRA(Rtt, op_MUL_24); + RzILOpPure *op_AND_27 = LOGAND(op_RSHIFT_25, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_40 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_41 = SHIFTRA(Rss, op_MUL_40); + RzILOpPure *op_AND_43 = LOGAND(op_RSHIFT_41, SN(64, 0xffffffff)); + RzILOpPure *op_SUB_51 = SUB(SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_27), DUP(op_AND_27))), CAST(32, MSB(DUP(op_AND_27)), DUP(op_AND_27)))), SN(32, 0), SN(32, 0x20)), SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_43), DUP(op_AND_43))), CAST(32, MSB(DUP(op_AND_43)), DUP(op_AND_43)))), SN(32, 0), SN(32, 0x20))); + RzILOpPure *op_AND_54 = LOGAND(op_SUB_51, CAST(64, MSB(SN(32, 3)), SN(32, 3))); + RzILOpPure *op_EQ_57 = EQ(op_AND_54, CAST(64, MSB(SN(32, 3)), SN(32, 3))); + RzILOpPure *op_MUL_62 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_63 = SHIFTRA(DUP(Rtt), op_MUL_62); + RzILOpPure *op_AND_65 = LOGAND(op_RSHIFT_63, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_77 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_78 = SHIFTRA(DUP(Rss), op_MUL_77); + RzILOpPure *op_AND_80 = LOGAND(op_RSHIFT_78, SN(64, 0xffffffff)); + RzILOpPure *op_SUB_88 = SUB(SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_65), DUP(op_AND_65))), CAST(32, MSB(DUP(op_AND_65)), DUP(op_AND_65)))), SN(32, 0), SN(32, 0x20)), SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_80), DUP(op_AND_80))), CAST(32, MSB(DUP(op_AND_80)), DUP(op_AND_80)))), SN(32, 0), SN(32, 0x20))); + RzILOpPure *op_ADD_91 = ADD(op_SUB_88, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *op_MUL_96 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_97 = SHIFTRA(DUP(Rtt), op_MUL_96); + RzILOpPure *op_AND_99 = LOGAND(op_RSHIFT_97, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_111 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_112 = SHIFTRA(DUP(Rss), op_MUL_111); + RzILOpPure *op_AND_114 = LOGAND(op_RSHIFT_112, SN(64, 0xffffffff)); + RzILOpPure *op_SUB_122 = SUB(SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_99), DUP(op_AND_99))), CAST(32, MSB(DUP(op_AND_99)), DUP(op_AND_99)))), SN(32, 0), SN(32, 0x20)), SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_114), DUP(op_AND_114))), CAST(32, MSB(DUP(op_AND_114)), DUP(op_AND_114)))), SN(32, 0), SN(32, 0x20))); + RzILOpPure *cond_123 = ITE(op_EQ_57, op_ADD_91, op_SUB_122); + RzILOpPure *op_RSHIFT_125 = SHIFTRA(cond_123, SN(32, 1)); + RzILOpPure *op_MUL_135 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_136 = SHIFTRA(DUP(Rtt), op_MUL_135); + RzILOpPure *op_AND_138 = LOGAND(op_RSHIFT_136, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_150 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_151 = SHIFTRA(DUP(Rss), op_MUL_150); + RzILOpPure *op_AND_153 = LOGAND(op_RSHIFT_151, SN(64, 0xffffffff)); + RzILOpPure *op_SUB_161 = SUB(SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_138), DUP(op_AND_138))), CAST(32, MSB(DUP(op_AND_138)), DUP(op_AND_138)))), SN(32, 0), SN(32, 0x20)), SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_153), DUP(op_AND_153))), CAST(32, MSB(DUP(op_AND_153)), DUP(op_AND_153)))), SN(32, 0), SN(32, 0x20))); + RzILOpPure *op_AND_164 = LOGAND(op_SUB_161, CAST(64, MSB(SN(32, 3)), SN(32, 3))); + RzILOpPure *op_EQ_167 = EQ(op_AND_164, CAST(64, MSB(SN(32, 3)), SN(32, 3))); + RzILOpPure *op_MUL_172 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_173 = SHIFTRA(DUP(Rtt), op_MUL_172); + RzILOpPure *op_AND_175 = LOGAND(op_RSHIFT_173, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_187 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_188 = SHIFTRA(DUP(Rss), op_MUL_187); + RzILOpPure *op_AND_190 = LOGAND(op_RSHIFT_188, SN(64, 0xffffffff)); + RzILOpPure *op_SUB_198 = SUB(SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_175), DUP(op_AND_175))), CAST(32, MSB(DUP(op_AND_175)), DUP(op_AND_175)))), SN(32, 0), SN(32, 0x20)), SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_190), DUP(op_AND_190))), CAST(32, MSB(DUP(op_AND_190)), DUP(op_AND_190)))), SN(32, 0), SN(32, 0x20))); + RzILOpPure *op_ADD_201 = ADD(op_SUB_198, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *op_MUL_206 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_207 = SHIFTRA(DUP(Rtt), op_MUL_206); + RzILOpPure *op_AND_209 = LOGAND(op_RSHIFT_207, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_221 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_222 = SHIFTRA(DUP(Rss), op_MUL_221); + RzILOpPure *op_AND_224 = LOGAND(op_RSHIFT_222, SN(64, 0xffffffff)); + RzILOpPure *op_SUB_232 = SUB(SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_209), DUP(op_AND_209))), CAST(32, MSB(DUP(op_AND_209)), DUP(op_AND_209)))), SN(32, 0), SN(32, 0x20)), SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_224), DUP(op_AND_224))), CAST(32, MSB(DUP(op_AND_224)), DUP(op_AND_224)))), SN(32, 0), SN(32, 0x20))); + RzILOpPure *cond_233 = ITE(op_EQ_167, op_ADD_201, op_SUB_232); + RzILOpPure *op_RSHIFT_235 = SHIFTRA(cond_233, SN(32, 1)); + RzILOpPure *op_EQ_236 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_125), SN(32, 0), SN(32, 0x20)), op_RSHIFT_235); + RzILOpPure *op_MUL_349 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_350 = SHIFTRA(DUP(Rtt), op_MUL_349); + RzILOpPure *op_AND_352 = LOGAND(op_RSHIFT_350, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_364 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_365 = SHIFTRA(DUP(Rss), op_MUL_364); + RzILOpPure *op_AND_367 = LOGAND(op_RSHIFT_365, SN(64, 0xffffffff)); + RzILOpPure *op_SUB_375 = SUB(SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_352), DUP(op_AND_352))), CAST(32, MSB(DUP(op_AND_352)), DUP(op_AND_352)))), SN(32, 0), SN(32, 0x20)), SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_367), DUP(op_AND_367))), CAST(32, MSB(DUP(op_AND_367)), DUP(op_AND_367)))), SN(32, 0), SN(32, 0x20))); + RzILOpPure *op_AND_378 = LOGAND(op_SUB_375, CAST(64, MSB(SN(32, 3)), SN(32, 3))); + RzILOpPure *op_EQ_381 = EQ(op_AND_378, CAST(64, MSB(SN(32, 3)), SN(32, 3))); + RzILOpPure *op_MUL_386 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_387 = SHIFTRA(DUP(Rtt), op_MUL_386); + RzILOpPure *op_AND_389 = LOGAND(op_RSHIFT_387, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_401 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_402 = SHIFTRA(DUP(Rss), op_MUL_401); + RzILOpPure *op_AND_404 = LOGAND(op_RSHIFT_402, SN(64, 0xffffffff)); + RzILOpPure *op_SUB_412 = SUB(SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_389), DUP(op_AND_389))), CAST(32, MSB(DUP(op_AND_389)), DUP(op_AND_389)))), SN(32, 0), SN(32, 0x20)), SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_404), DUP(op_AND_404))), CAST(32, MSB(DUP(op_AND_404)), DUP(op_AND_404)))), SN(32, 0), SN(32, 0x20))); + RzILOpPure *op_ADD_415 = ADD(op_SUB_412, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *op_MUL_420 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_421 = SHIFTRA(DUP(Rtt), op_MUL_420); + RzILOpPure *op_AND_423 = LOGAND(op_RSHIFT_421, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_435 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_436 = SHIFTRA(DUP(Rss), op_MUL_435); + RzILOpPure *op_AND_438 = LOGAND(op_RSHIFT_436, SN(64, 0xffffffff)); + RzILOpPure *op_SUB_446 = SUB(SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_423), DUP(op_AND_423))), CAST(32, MSB(DUP(op_AND_423)), DUP(op_AND_423)))), SN(32, 0), SN(32, 0x20)), SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_438), DUP(op_AND_438))), CAST(32, MSB(DUP(op_AND_438)), DUP(op_AND_438)))), SN(32, 0), SN(32, 0x20))); + RzILOpPure *cond_447 = ITE(op_EQ_381, op_ADD_415, op_SUB_446); + RzILOpPure *op_RSHIFT_449 = SHIFTRA(cond_447, SN(32, 1)); + RzILOpPure *op_LT_452 = SLT(op_RSHIFT_449, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_457 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_458 = NEG(op_LSHIFT_457); + RzILOpPure *op_LSHIFT_463 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_466 = SUB(op_LSHIFT_463, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_467 = ITE(op_LT_452, op_NEG_458, op_SUB_466); + RzILOpEffect *gcc_expr_468 = BRANCH(op_EQ_236, EMPTY(), set_usr_field_call_344); + + // h_tmp95 = HYB(gcc_expr_if ((sextract64(((ut64) ((((sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) & ((st64) 0x3)) == ((st64) 0x3)) ? sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) + ((st64) 0x1) : sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20)) >> 0x1)), 0x0, 0x20) == ((((sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) & ((st64) 0x3)) == ((st64) 0x3)) ? sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) + ((st64) 0x1) : sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20)) >> 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) & ((st64) 0x3)) == ((st64) 0x3)) ? sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) + ((st64) 0x1) : sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20)) >> 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_470 = SETL("h_tmp95", cond_467); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((sextract64(((ut64) ...; + RzILOpEffect *seq_471 = SEQN(2, gcc_expr_468, op_ASSIGN_hybrid_tmp_470); + + // Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ((((sextract64(((ut64) ((((sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) & ((st64) 0x3)) == ((st64) 0x3)) ? sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) + ((st64) 0x1) : sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20)) >> 0x1)), 0x0, 0x20) == ((((sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) & ((st64) 0x3)) == ((st64) 0x3)) ? sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) + ((st64) 0x1) : sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20)) >> 0x1)) ? ((((sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) & ((st64) 0x3)) == ((st64) 0x3)) ? sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) + ((st64) 0x1) : sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20)) >> 0x1) : h_tmp95) & 0xffffffff) << i * 0x20)); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffffffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_241 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_242 = SHIFTRA(DUP(Rtt), op_MUL_241); + RzILOpPure *op_AND_244 = LOGAND(op_RSHIFT_242, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_256 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_257 = SHIFTRA(DUP(Rss), op_MUL_256); + RzILOpPure *op_AND_259 = LOGAND(op_RSHIFT_257, SN(64, 0xffffffff)); + RzILOpPure *op_SUB_267 = SUB(SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_244), DUP(op_AND_244))), CAST(32, MSB(DUP(op_AND_244)), DUP(op_AND_244)))), SN(32, 0), SN(32, 0x20)), SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_259), DUP(op_AND_259))), CAST(32, MSB(DUP(op_AND_259)), DUP(op_AND_259)))), SN(32, 0), SN(32, 0x20))); + RzILOpPure *op_AND_270 = LOGAND(op_SUB_267, CAST(64, MSB(SN(32, 3)), SN(32, 3))); + RzILOpPure *op_EQ_273 = EQ(op_AND_270, CAST(64, MSB(SN(32, 3)), SN(32, 3))); + RzILOpPure *op_MUL_278 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_279 = SHIFTRA(DUP(Rtt), op_MUL_278); + RzILOpPure *op_AND_281 = LOGAND(op_RSHIFT_279, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_293 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_294 = SHIFTRA(DUP(Rss), op_MUL_293); + RzILOpPure *op_AND_296 = LOGAND(op_RSHIFT_294, SN(64, 0xffffffff)); + RzILOpPure *op_SUB_304 = SUB(SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_281), DUP(op_AND_281))), CAST(32, MSB(DUP(op_AND_281)), DUP(op_AND_281)))), SN(32, 0), SN(32, 0x20)), SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_296), DUP(op_AND_296))), CAST(32, MSB(DUP(op_AND_296)), DUP(op_AND_296)))), SN(32, 0), SN(32, 0x20))); + RzILOpPure *op_ADD_307 = ADD(op_SUB_304, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *op_MUL_312 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_313 = SHIFTRA(DUP(Rtt), op_MUL_312); + RzILOpPure *op_AND_315 = LOGAND(op_RSHIFT_313, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_327 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_328 = SHIFTRA(DUP(Rss), op_MUL_327); + RzILOpPure *op_AND_330 = LOGAND(op_RSHIFT_328, SN(64, 0xffffffff)); + RzILOpPure *op_SUB_338 = SUB(SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_315), DUP(op_AND_315))), CAST(32, MSB(DUP(op_AND_315)), DUP(op_AND_315)))), SN(32, 0), SN(32, 0x20)), SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_330), DUP(op_AND_330))), CAST(32, MSB(DUP(op_AND_330)), DUP(op_AND_330)))), SN(32, 0), SN(32, 0x20))); + RzILOpPure *cond_339 = ITE(op_EQ_273, op_ADD_307, op_SUB_338); + RzILOpPure *op_RSHIFT_341 = SHIFTRA(cond_339, SN(32, 1)); + RzILOpPure *cond_472 = ITE(DUP(op_EQ_236), op_RSHIFT_341, VARL("h_tmp95")); + RzILOpPure *op_AND_474 = LOGAND(cond_472, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_476 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_477 = SHIFTL0(op_AND_474, op_MUL_476); + RzILOpPure *op_OR_478 = LOGOR(op_AND_15, op_LSHIFT_477); + RzILOpEffect *op_ASSIGN_479 = WRITE_REG(bundle, Rdd_op, op_OR_478); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((sextract64(((ut ...; + RzILOpEffect *seq_480 = SEQN(2, seq_471, op_ASSIGN_479); + + // seq(h_tmp94; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((se ...; + RzILOpEffect *seq_482 = seq_480; + + // seq(seq(h_tmp94; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (( ...; + RzILOpEffect *seq_483 = SEQN(2, seq_482, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp94; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_484 = REPEAT(op_LT_4, seq_483); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp94; seq(seq(HYB(gc ...; + RzILOpEffect *seq_485 = SEQN(2, op_ASSIGN_2, for_484); + + RzILOpEffect *instruction_sequence = seq_485; + return instruction_sequence; +} + +// Rdd = vnavgw(Rtt,Rss):rnd:sat +RzILOpEffect *hex_il_op_a2_vnavgwr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp96 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp96", VARL("i")); + + // seq(h_tmp96 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_137 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) + ((st64) 0x1) >> 0x1)), 0x0, 0x20) == (sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) + ((st64) 0x1) >> 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) + ((st64) 0x1) >> 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_MUL_24 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_25 = SHIFTRA(Rtt, op_MUL_24); + RzILOpPure *op_AND_27 = LOGAND(op_RSHIFT_25, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_40 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_41 = SHIFTRA(Rss, op_MUL_40); + RzILOpPure *op_AND_43 = LOGAND(op_RSHIFT_41, SN(64, 0xffffffff)); + RzILOpPure *op_SUB_51 = SUB(SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_27), DUP(op_AND_27))), CAST(32, MSB(DUP(op_AND_27)), DUP(op_AND_27)))), SN(32, 0), SN(32, 0x20)), SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_43), DUP(op_AND_43))), CAST(32, MSB(DUP(op_AND_43)), DUP(op_AND_43)))), SN(32, 0), SN(32, 0x20))); + RzILOpPure *op_ADD_54 = ADD(op_SUB_51, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *op_RSHIFT_56 = SHIFTRA(op_ADD_54, SN(32, 1)); + RzILOpPure *op_MUL_66 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_67 = SHIFTRA(DUP(Rtt), op_MUL_66); + RzILOpPure *op_AND_69 = LOGAND(op_RSHIFT_67, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_81 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_82 = SHIFTRA(DUP(Rss), op_MUL_81); + RzILOpPure *op_AND_84 = LOGAND(op_RSHIFT_82, SN(64, 0xffffffff)); + RzILOpPure *op_SUB_92 = SUB(SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_69), DUP(op_AND_69))), CAST(32, MSB(DUP(op_AND_69)), DUP(op_AND_69)))), SN(32, 0), SN(32, 0x20)), SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_84), DUP(op_AND_84))), CAST(32, MSB(DUP(op_AND_84)), DUP(op_AND_84)))), SN(32, 0), SN(32, 0x20))); + RzILOpPure *op_ADD_95 = ADD(op_SUB_92, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *op_RSHIFT_97 = SHIFTRA(op_ADD_95, SN(32, 1)); + RzILOpPure *op_EQ_98 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_56), SN(32, 0), SN(32, 0x20)), op_RSHIFT_97); + RzILOpPure *op_MUL_142 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_143 = SHIFTRA(DUP(Rtt), op_MUL_142); + RzILOpPure *op_AND_145 = LOGAND(op_RSHIFT_143, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_157 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_158 = SHIFTRA(DUP(Rss), op_MUL_157); + RzILOpPure *op_AND_160 = LOGAND(op_RSHIFT_158, SN(64, 0xffffffff)); + RzILOpPure *op_SUB_168 = SUB(SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_145), DUP(op_AND_145))), CAST(32, MSB(DUP(op_AND_145)), DUP(op_AND_145)))), SN(32, 0), SN(32, 0x20)), SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_160), DUP(op_AND_160))), CAST(32, MSB(DUP(op_AND_160)), DUP(op_AND_160)))), SN(32, 0), SN(32, 0x20))); + RzILOpPure *op_ADD_171 = ADD(op_SUB_168, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *op_RSHIFT_173 = SHIFTRA(op_ADD_171, SN(32, 1)); + RzILOpPure *op_LT_176 = SLT(op_RSHIFT_173, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_181 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_182 = NEG(op_LSHIFT_181); + RzILOpPure *op_LSHIFT_187 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_190 = SUB(op_LSHIFT_187, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_191 = ITE(op_LT_176, op_NEG_182, op_SUB_190); + RzILOpEffect *gcc_expr_192 = BRANCH(op_EQ_98, EMPTY(), set_usr_field_call_137); + + // h_tmp97 = HYB(gcc_expr_if ((sextract64(((ut64) (sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) + ((st64) 0x1) >> 0x1)), 0x0, 0x20) == (sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) + ((st64) 0x1) >> 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) + ((st64) 0x1) >> 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_194 = SETL("h_tmp97", cond_191); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (sextract64(((ut64) ((s ...; + RzILOpEffect *seq_195 = SEQN(2, gcc_expr_192, op_ASSIGN_hybrid_tmp_194); + + // Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ((((sextract64(((ut64) (sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) + ((st64) 0x1) >> 0x1)), 0x0, 0x20) == (sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) + ((st64) 0x1) >> 0x1)) ? (sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) - sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) + ((st64) 0x1) >> 0x1) : h_tmp97) & 0xffffffff) << i * 0x20)); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffffffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_103 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_104 = SHIFTRA(DUP(Rtt), op_MUL_103); + RzILOpPure *op_AND_106 = LOGAND(op_RSHIFT_104, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_118 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_119 = SHIFTRA(DUP(Rss), op_MUL_118); + RzILOpPure *op_AND_121 = LOGAND(op_RSHIFT_119, SN(64, 0xffffffff)); + RzILOpPure *op_SUB_129 = SUB(SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_106), DUP(op_AND_106))), CAST(32, MSB(DUP(op_AND_106)), DUP(op_AND_106)))), SN(32, 0), SN(32, 0x20)), SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_121), DUP(op_AND_121))), CAST(32, MSB(DUP(op_AND_121)), DUP(op_AND_121)))), SN(32, 0), SN(32, 0x20))); + RzILOpPure *op_ADD_132 = ADD(op_SUB_129, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *op_RSHIFT_134 = SHIFTRA(op_ADD_132, SN(32, 1)); + RzILOpPure *cond_196 = ITE(DUP(op_EQ_98), op_RSHIFT_134, VARL("h_tmp97")); + RzILOpPure *op_AND_198 = LOGAND(cond_196, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_200 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_201 = SHIFTL0(op_AND_198, op_MUL_200); + RzILOpPure *op_OR_202 = LOGOR(op_AND_15, op_LSHIFT_201); + RzILOpEffect *op_ASSIGN_203 = WRITE_REG(bundle, Rdd_op, op_OR_202); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (sextract64(((ut64) ...; + RzILOpEffect *seq_204 = SEQN(2, seq_195, op_ASSIGN_203); + + // seq(h_tmp96; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (sextr ...; + RzILOpEffect *seq_206 = seq_204; + + // seq(seq(h_tmp96; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (s ...; + RzILOpEffect *seq_207 = SEQN(2, seq_206, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp96; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (s ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_208 = REPEAT(op_LT_4, seq_207); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp96; seq(seq(HYB(gc ...; + RzILOpEffect *seq_209 = SEQN(2, op_ASSIGN_2, for_208); + + RzILOpEffect *instruction_sequence = seq_209; + return instruction_sequence; +} + +// Rdd = vraddub(Rss,Rtt) +RzILOpEffect *hex_il_op_a2_vraddub(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rdd = ((st64) 0x0); + RzILOpEffect *op_ASSIGN_4 = WRITE_REG(bundle, Rdd_op, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_6 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_9 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp98 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_11 = SETL("h_tmp98", VARL("i")); + + // seq(h_tmp98 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_12 = SEQN(2, op_ASSIGN_hybrid_tmp_11, op_INC_9); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((st64) ((st32) ((Rdd >> 0x0) & 0xffffffff))) + ((st64) ((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) + ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff))))) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_17 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_18 = LOGNOT(op_LSHIFT_17); + RzILOpPure *op_AND_19 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_18); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(READ_REG(pkt, Rdd_op, true), SN(32, 0)); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_30 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_31 = SHIFTRA(Rss, op_MUL_30); + RzILOpPure *op_AND_34 = LOGAND(op_RSHIFT_31, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_38 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_39 = SHIFTRA(Rtt, op_MUL_38); + RzILOpPure *op_AND_42 = LOGAND(op_RSHIFT_39, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_ADD_46 = ADD(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_34)), CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_42))); + RzILOpPure *op_ADD_48 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_25), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(64, MSB(op_ADD_46), DUP(op_ADD_46))); + RzILOpPure *op_AND_50 = LOGAND(op_ADD_48, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_54 = SHIFTL0(op_AND_50, SN(32, 0)); + RzILOpPure *op_OR_55 = LOGOR(op_AND_19, op_LSHIFT_54); + RzILOpEffect *op_ASSIGN_56 = WRITE_REG(bundle, Rdd_op, op_OR_55); + + // seq(h_tmp98; Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((st64) ( ...; + RzILOpEffect *seq_58 = op_ASSIGN_56; + + // seq(seq(h_tmp98; Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((st6 ...; + RzILOpEffect *seq_59 = SEQN(2, seq_58, seq_12); + + // while ((i < 0x4)) { seq(seq(h_tmp98; Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((st6 ... }; + RzILOpPure *op_LT_8 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_60 = REPEAT(op_LT_8, seq_59); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp98; Rdd = ((Rdd & ...; + RzILOpEffect *seq_61 = SEQN(2, op_ASSIGN_6, for_60); + + // i = 0x4; + RzILOpEffect *op_ASSIGN_63 = SETL("i", SN(32, 4)); + + // HYB(++i); + RzILOpEffect *op_INC_66 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp99 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_68 = SETL("h_tmp99", VARL("i")); + + // seq(h_tmp99 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_69 = SEQN(2, op_ASSIGN_hybrid_tmp_68, op_INC_66); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((st64) ((st32) ((Rdd >> 0x20) & 0xffffffff))) + ((st64) ((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) + ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff))))) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_74 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_75 = LOGNOT(op_LSHIFT_74); + RzILOpPure *op_AND_76 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_75); + RzILOpPure *op_RSHIFT_80 = SHIFTRA(READ_REG(pkt, Rdd_op, true), SN(32, 0x20)); + RzILOpPure *op_AND_82 = LOGAND(op_RSHIFT_80, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_86 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_87 = SHIFTRA(DUP(Rss), op_MUL_86); + RzILOpPure *op_AND_90 = LOGAND(op_RSHIFT_87, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_93 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_94 = SHIFTRA(DUP(Rtt), op_MUL_93); + RzILOpPure *op_AND_97 = LOGAND(op_RSHIFT_94, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_ADD_101 = ADD(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_90)), CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_97))); + RzILOpPure *op_ADD_103 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_82), DUP(op_AND_82))), CAST(32, MSB(DUP(op_AND_82)), DUP(op_AND_82))), CAST(64, MSB(op_ADD_101), DUP(op_ADD_101))); + RzILOpPure *op_AND_105 = LOGAND(op_ADD_103, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_109 = SHIFTL0(op_AND_105, SN(32, 0x20)); + RzILOpPure *op_OR_110 = LOGOR(op_AND_76, op_LSHIFT_109); + RzILOpEffect *op_ASSIGN_111 = WRITE_REG(bundle, Rdd_op, op_OR_110); + + // seq(h_tmp99; Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((st64) ...; + RzILOpEffect *seq_113 = op_ASSIGN_111; + + // seq(seq(h_tmp99; Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((st ...; + RzILOpEffect *seq_114 = SEQN(2, seq_113, seq_69); + + // while ((i < 0x8)) { seq(seq(h_tmp99; Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((st ... }; + RzILOpPure *op_LT_65 = SLT(VARL("i"), SN(32, 8)); + RzILOpEffect *for_115 = REPEAT(op_LT_65, seq_114); + + // seq(i = 0x4; while ((i < 0x8)) { seq(seq(h_tmp99; Rdd = ((Rdd & ...; + RzILOpEffect *seq_116 = SEQN(2, op_ASSIGN_63, for_115); + + RzILOpEffect *instruction_sequence = SEQN(3, op_ASSIGN_4, seq_61, seq_116); + return instruction_sequence; +} + +// Rxx += vraddub(Rss,Rtt) +RzILOpEffect *hex_il_op_a2_vraddub_acc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp100 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp100", VARL("i")); + + // seq(h_tmp100 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((st64) ((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) + ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff))))) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_14 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_15 = LOGNOT(op_LSHIFT_14); + RzILOpPure *op_AND_16 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_15); + RzILOpPure *op_RSHIFT_20 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_20, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_27 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_28 = SHIFTRA(Rss, op_MUL_27); + RzILOpPure *op_AND_31 = LOGAND(op_RSHIFT_28, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_35 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_36 = SHIFTRA(Rtt, op_MUL_35); + RzILOpPure *op_AND_39 = LOGAND(op_RSHIFT_36, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_ADD_43 = ADD(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_31)), CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_39))); + RzILOpPure *op_ADD_45 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_22), DUP(op_AND_22))), CAST(32, MSB(DUP(op_AND_22)), DUP(op_AND_22))), CAST(64, MSB(op_ADD_43), DUP(op_ADD_43))); + RzILOpPure *op_AND_47 = LOGAND(op_ADD_45, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_51 = SHIFTL0(op_AND_47, SN(32, 0)); + RzILOpPure *op_OR_52 = LOGOR(op_AND_16, op_LSHIFT_51); + RzILOpEffect *op_ASSIGN_53 = WRITE_REG(bundle, Rxx_op, op_OR_52); + + // seq(h_tmp100; Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((st64) ...; + RzILOpEffect *seq_55 = op_ASSIGN_53; + + // seq(seq(h_tmp100; Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((st ...; + RzILOpEffect *seq_56 = SEQN(2, seq_55, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp100; Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((st ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_57 = REPEAT(op_LT_4, seq_56); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp100; Rxx = ((Rxx & ...; + RzILOpEffect *seq_58 = SEQN(2, op_ASSIGN_2, for_57); + + // i = 0x4; + RzILOpEffect *op_ASSIGN_60 = SETL("i", SN(32, 4)); + + // HYB(++i); + RzILOpEffect *op_INC_63 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp101 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_65 = SETL("h_tmp101", VARL("i")); + + // seq(h_tmp101 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_66 = SEQN(2, op_ASSIGN_hybrid_tmp_65, op_INC_63); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((st64) ((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) + ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff))))) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_71 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_72 = LOGNOT(op_LSHIFT_71); + RzILOpPure *op_AND_73 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_72); + RzILOpPure *op_RSHIFT_77 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_79 = LOGAND(op_RSHIFT_77, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_83 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_84 = SHIFTRA(DUP(Rss), op_MUL_83); + RzILOpPure *op_AND_87 = LOGAND(op_RSHIFT_84, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_90 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_91 = SHIFTRA(DUP(Rtt), op_MUL_90); + RzILOpPure *op_AND_94 = LOGAND(op_RSHIFT_91, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_ADD_98 = ADD(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_87)), CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_94))); + RzILOpPure *op_ADD_100 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_79), DUP(op_AND_79))), CAST(32, MSB(DUP(op_AND_79)), DUP(op_AND_79))), CAST(64, MSB(op_ADD_98), DUP(op_ADD_98))); + RzILOpPure *op_AND_102 = LOGAND(op_ADD_100, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_106 = SHIFTL0(op_AND_102, SN(32, 0x20)); + RzILOpPure *op_OR_107 = LOGOR(op_AND_73, op_LSHIFT_106); + RzILOpEffect *op_ASSIGN_108 = WRITE_REG(bundle, Rxx_op, op_OR_107); + + // seq(h_tmp101; Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((st64) ...; + RzILOpEffect *seq_110 = op_ASSIGN_108; + + // seq(seq(h_tmp101; Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((s ...; + RzILOpEffect *seq_111 = SEQN(2, seq_110, seq_66); + + // while ((i < 0x8)) { seq(seq(h_tmp101; Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((s ... }; + RzILOpPure *op_LT_62 = SLT(VARL("i"), SN(32, 8)); + RzILOpEffect *for_112 = REPEAT(op_LT_62, seq_111); + + // seq(i = 0x4; while ((i < 0x8)) { seq(seq(h_tmp101; Rxx = ((Rxx & ...; + RzILOpEffect *seq_113 = SEQN(2, op_ASSIGN_60, for_112); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_58, seq_113); + return instruction_sequence; +} + +// Rdd = vrsadub(Rss,Rtt) +RzILOpEffect *hex_il_op_a2_vrsadub(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rdd = ((st64) 0x0); + RzILOpEffect *op_ASSIGN_4 = WRITE_REG(bundle, Rdd_op, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_6 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_9 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp102 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_11 = SETL("h_tmp102", VARL("i")); + + // seq(h_tmp102 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_12 = SEQN(2, op_ASSIGN_hybrid_tmp_11, op_INC_9); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((st64) ((st32) ((Rdd >> 0x0) & 0xffffffff))) + ((st64) ((((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) - ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff)))) < 0x0) ? (-((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) - ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff))))) : ((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) - ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff)))))) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_17 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_18 = LOGNOT(op_LSHIFT_17); + RzILOpPure *op_AND_19 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_18); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(READ_REG(pkt, Rdd_op, true), SN(32, 0)); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_30 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_31 = SHIFTRA(Rss, op_MUL_30); + RzILOpPure *op_AND_34 = LOGAND(op_RSHIFT_31, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_38 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_39 = SHIFTRA(Rtt, op_MUL_38); + RzILOpPure *op_AND_42 = LOGAND(op_RSHIFT_39, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_SUB_46 = SUB(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_34)), CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_42))); + RzILOpPure *op_LT_48 = SLT(op_SUB_46, SN(32, 0)); + RzILOpPure *op_MUL_50 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_51 = SHIFTRA(DUP(Rss), op_MUL_50); + RzILOpPure *op_AND_54 = LOGAND(op_RSHIFT_51, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_57 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_58 = SHIFTRA(DUP(Rtt), op_MUL_57); + RzILOpPure *op_AND_61 = LOGAND(op_RSHIFT_58, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_SUB_65 = SUB(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_54)), CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_61))); + RzILOpPure *op_NEG_66 = NEG(op_SUB_65); + RzILOpPure *op_MUL_68 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_69 = SHIFTRA(DUP(Rss), op_MUL_68); + RzILOpPure *op_AND_72 = LOGAND(op_RSHIFT_69, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_75 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_76 = SHIFTRA(DUP(Rtt), op_MUL_75); + RzILOpPure *op_AND_79 = LOGAND(op_RSHIFT_76, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_SUB_83 = SUB(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_72)), CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_79))); + RzILOpPure *cond_84 = ITE(op_LT_48, op_NEG_66, op_SUB_83); + RzILOpPure *op_ADD_86 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_25), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(64, MSB(cond_84), DUP(cond_84))); + RzILOpPure *op_AND_88 = LOGAND(op_ADD_86, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_92 = SHIFTL0(op_AND_88, SN(32, 0)); + RzILOpPure *op_OR_93 = LOGOR(op_AND_19, op_LSHIFT_92); + RzILOpEffect *op_ASSIGN_94 = WRITE_REG(bundle, Rdd_op, op_OR_93); + + // seq(h_tmp102; Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((st64) ...; + RzILOpEffect *seq_96 = op_ASSIGN_94; + + // seq(seq(h_tmp102; Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((st ...; + RzILOpEffect *seq_97 = SEQN(2, seq_96, seq_12); + + // while ((i < 0x4)) { seq(seq(h_tmp102; Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((st ... }; + RzILOpPure *op_LT_8 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_98 = REPEAT(op_LT_8, seq_97); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp102; Rdd = ((Rdd & ...; + RzILOpEffect *seq_99 = SEQN(2, op_ASSIGN_6, for_98); + + // i = 0x4; + RzILOpEffect *op_ASSIGN_101 = SETL("i", SN(32, 4)); + + // HYB(++i); + RzILOpEffect *op_INC_104 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp103 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_106 = SETL("h_tmp103", VARL("i")); + + // seq(h_tmp103 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_107 = SEQN(2, op_ASSIGN_hybrid_tmp_106, op_INC_104); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((st64) ((st32) ((Rdd >> 0x20) & 0xffffffff))) + ((st64) ((((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) - ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff)))) < 0x0) ? (-((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) - ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff))))) : ((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) - ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff)))))) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_112 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_113 = LOGNOT(op_LSHIFT_112); + RzILOpPure *op_AND_114 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_113); + RzILOpPure *op_RSHIFT_118 = SHIFTRA(READ_REG(pkt, Rdd_op, true), SN(32, 0x20)); + RzILOpPure *op_AND_120 = LOGAND(op_RSHIFT_118, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_124 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_125 = SHIFTRA(DUP(Rss), op_MUL_124); + RzILOpPure *op_AND_128 = LOGAND(op_RSHIFT_125, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_131 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_132 = SHIFTRA(DUP(Rtt), op_MUL_131); + RzILOpPure *op_AND_135 = LOGAND(op_RSHIFT_132, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_SUB_139 = SUB(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_128)), CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_135))); + RzILOpPure *op_LT_141 = SLT(op_SUB_139, SN(32, 0)); + RzILOpPure *op_MUL_143 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_144 = SHIFTRA(DUP(Rss), op_MUL_143); + RzILOpPure *op_AND_147 = LOGAND(op_RSHIFT_144, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_150 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_151 = SHIFTRA(DUP(Rtt), op_MUL_150); + RzILOpPure *op_AND_154 = LOGAND(op_RSHIFT_151, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_SUB_158 = SUB(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_147)), CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_154))); + RzILOpPure *op_NEG_159 = NEG(op_SUB_158); + RzILOpPure *op_MUL_161 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_162 = SHIFTRA(DUP(Rss), op_MUL_161); + RzILOpPure *op_AND_165 = LOGAND(op_RSHIFT_162, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_168 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_169 = SHIFTRA(DUP(Rtt), op_MUL_168); + RzILOpPure *op_AND_172 = LOGAND(op_RSHIFT_169, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_SUB_176 = SUB(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_165)), CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_172))); + RzILOpPure *cond_177 = ITE(op_LT_141, op_NEG_159, op_SUB_176); + RzILOpPure *op_ADD_179 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_120), DUP(op_AND_120))), CAST(32, MSB(DUP(op_AND_120)), DUP(op_AND_120))), CAST(64, MSB(cond_177), DUP(cond_177))); + RzILOpPure *op_AND_181 = LOGAND(op_ADD_179, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_185 = SHIFTL0(op_AND_181, SN(32, 0x20)); + RzILOpPure *op_OR_186 = LOGOR(op_AND_114, op_LSHIFT_185); + RzILOpEffect *op_ASSIGN_187 = WRITE_REG(bundle, Rdd_op, op_OR_186); + + // seq(h_tmp103; Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((st64) ...; + RzILOpEffect *seq_189 = op_ASSIGN_187; + + // seq(seq(h_tmp103; Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((s ...; + RzILOpEffect *seq_190 = SEQN(2, seq_189, seq_107); + + // while ((i < 0x8)) { seq(seq(h_tmp103; Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((s ... }; + RzILOpPure *op_LT_103 = SLT(VARL("i"), SN(32, 8)); + RzILOpEffect *for_191 = REPEAT(op_LT_103, seq_190); + + // seq(i = 0x4; while ((i < 0x8)) { seq(seq(h_tmp103; Rdd = ((Rdd & ...; + RzILOpEffect *seq_192 = SEQN(2, op_ASSIGN_101, for_191); + + RzILOpEffect *instruction_sequence = SEQN(3, op_ASSIGN_4, seq_99, seq_192); + return instruction_sequence; +} + +// Rxx += vrsadub(Rss,Rtt) +RzILOpEffect *hex_il_op_a2_vrsadub_acc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp104 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp104", VARL("i")); + + // seq(h_tmp104 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((st64) ((((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) - ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff)))) < 0x0) ? (-((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) - ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff))))) : ((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) - ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff)))))) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_14 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_15 = LOGNOT(op_LSHIFT_14); + RzILOpPure *op_AND_16 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_15); + RzILOpPure *op_RSHIFT_20 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_20, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_27 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_28 = SHIFTRA(Rss, op_MUL_27); + RzILOpPure *op_AND_31 = LOGAND(op_RSHIFT_28, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_35 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_36 = SHIFTRA(Rtt, op_MUL_35); + RzILOpPure *op_AND_39 = LOGAND(op_RSHIFT_36, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_SUB_43 = SUB(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_31)), CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_39))); + RzILOpPure *op_LT_45 = SLT(op_SUB_43, SN(32, 0)); + RzILOpPure *op_MUL_47 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_48 = SHIFTRA(DUP(Rss), op_MUL_47); + RzILOpPure *op_AND_51 = LOGAND(op_RSHIFT_48, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_54 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_55 = SHIFTRA(DUP(Rtt), op_MUL_54); + RzILOpPure *op_AND_58 = LOGAND(op_RSHIFT_55, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_SUB_62 = SUB(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_51)), CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_58))); + RzILOpPure *op_NEG_63 = NEG(op_SUB_62); + RzILOpPure *op_MUL_65 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_66 = SHIFTRA(DUP(Rss), op_MUL_65); + RzILOpPure *op_AND_69 = LOGAND(op_RSHIFT_66, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_72 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_73 = SHIFTRA(DUP(Rtt), op_MUL_72); + RzILOpPure *op_AND_76 = LOGAND(op_RSHIFT_73, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_SUB_80 = SUB(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_69)), CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_76))); + RzILOpPure *cond_81 = ITE(op_LT_45, op_NEG_63, op_SUB_80); + RzILOpPure *op_ADD_83 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_22), DUP(op_AND_22))), CAST(32, MSB(DUP(op_AND_22)), DUP(op_AND_22))), CAST(64, MSB(cond_81), DUP(cond_81))); + RzILOpPure *op_AND_85 = LOGAND(op_ADD_83, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_89 = SHIFTL0(op_AND_85, SN(32, 0)); + RzILOpPure *op_OR_90 = LOGOR(op_AND_16, op_LSHIFT_89); + RzILOpEffect *op_ASSIGN_91 = WRITE_REG(bundle, Rxx_op, op_OR_90); + + // seq(h_tmp104; Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((st64) ...; + RzILOpEffect *seq_93 = op_ASSIGN_91; + + // seq(seq(h_tmp104; Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((st ...; + RzILOpEffect *seq_94 = SEQN(2, seq_93, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp104; Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((st ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_95 = REPEAT(op_LT_4, seq_94); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp104; Rxx = ((Rxx & ...; + RzILOpEffect *seq_96 = SEQN(2, op_ASSIGN_2, for_95); + + // i = 0x4; + RzILOpEffect *op_ASSIGN_98 = SETL("i", SN(32, 4)); + + // HYB(++i); + RzILOpEffect *op_INC_101 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp105 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_103 = SETL("h_tmp105", VARL("i")); + + // seq(h_tmp105 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_104 = SEQN(2, op_ASSIGN_hybrid_tmp_103, op_INC_101); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((st64) ((((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) - ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff)))) < 0x0) ? (-((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) - ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff))))) : ((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) - ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff)))))) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_109 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_110 = LOGNOT(op_LSHIFT_109); + RzILOpPure *op_AND_111 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_110); + RzILOpPure *op_RSHIFT_115 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_117 = LOGAND(op_RSHIFT_115, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_121 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_122 = SHIFTRA(DUP(Rss), op_MUL_121); + RzILOpPure *op_AND_125 = LOGAND(op_RSHIFT_122, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_128 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_129 = SHIFTRA(DUP(Rtt), op_MUL_128); + RzILOpPure *op_AND_132 = LOGAND(op_RSHIFT_129, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_SUB_136 = SUB(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_125)), CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_132))); + RzILOpPure *op_LT_138 = SLT(op_SUB_136, SN(32, 0)); + RzILOpPure *op_MUL_140 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_141 = SHIFTRA(DUP(Rss), op_MUL_140); + RzILOpPure *op_AND_144 = LOGAND(op_RSHIFT_141, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_147 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_148 = SHIFTRA(DUP(Rtt), op_MUL_147); + RzILOpPure *op_AND_151 = LOGAND(op_RSHIFT_148, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_SUB_155 = SUB(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_144)), CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_151))); + RzILOpPure *op_NEG_156 = NEG(op_SUB_155); + RzILOpPure *op_MUL_158 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_159 = SHIFTRA(DUP(Rss), op_MUL_158); + RzILOpPure *op_AND_162 = LOGAND(op_RSHIFT_159, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_165 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_166 = SHIFTRA(DUP(Rtt), op_MUL_165); + RzILOpPure *op_AND_169 = LOGAND(op_RSHIFT_166, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_SUB_173 = SUB(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_162)), CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_169))); + RzILOpPure *cond_174 = ITE(op_LT_138, op_NEG_156, op_SUB_173); + RzILOpPure *op_ADD_176 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_117), DUP(op_AND_117))), CAST(32, MSB(DUP(op_AND_117)), DUP(op_AND_117))), CAST(64, MSB(cond_174), DUP(cond_174))); + RzILOpPure *op_AND_178 = LOGAND(op_ADD_176, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_182 = SHIFTL0(op_AND_178, SN(32, 0x20)); + RzILOpPure *op_OR_183 = LOGOR(op_AND_111, op_LSHIFT_182); + RzILOpEffect *op_ASSIGN_184 = WRITE_REG(bundle, Rxx_op, op_OR_183); + + // seq(h_tmp105; Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((st64) ...; + RzILOpEffect *seq_186 = op_ASSIGN_184; + + // seq(seq(h_tmp105; Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((s ...; + RzILOpEffect *seq_187 = SEQN(2, seq_186, seq_104); + + // while ((i < 0x8)) { seq(seq(h_tmp105; Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((s ... }; + RzILOpPure *op_LT_100 = SLT(VARL("i"), SN(32, 8)); + RzILOpEffect *for_188 = REPEAT(op_LT_100, seq_187); + + // seq(i = 0x4; while ((i < 0x8)) { seq(seq(h_tmp105; Rxx = ((Rxx & ...; + RzILOpEffect *seq_189 = SEQN(2, op_ASSIGN_98, for_188); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_96, seq_189); + return instruction_sequence; +} + +// Rdd = vsubh(Rtt,Rss) +RzILOpEffect *hex_il_op_a2_vsubh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp106 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp106", VARL("i")); + + // seq(h_tmp106 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rtt, op_MUL_18); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rss, op_MUL_26); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_27, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_34 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_22), DUP(op_AND_22))), CAST(16, MSB(DUP(op_AND_22)), DUP(op_AND_22))), CAST(32, MSB(CAST(16, MSB(op_AND_30), DUP(op_AND_30))), CAST(16, MSB(DUP(op_AND_30)), DUP(op_AND_30)))); + RzILOpPure *op_AND_36 = LOGAND(op_SUB_34, SN(32, 0xffff)); + RzILOpPure *op_MUL_39 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_40 = SHIFTL0(CAST(64, IL_FALSE, op_AND_36), op_MUL_39); + RzILOpPure *op_OR_42 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_40); + RzILOpEffect *op_ASSIGN_44 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_42)); + + // seq(h_tmp106; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x1 ...; + RzILOpEffect *seq_46 = op_ASSIGN_44; + + // seq(seq(h_tmp106; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_47 = SEQN(2, seq_46, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp106; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_48 = REPEAT(op_LT_4, seq_47); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp106; Rdd = ((st64) ...; + RzILOpEffect *seq_49 = SEQN(2, op_ASSIGN_2, for_48); + + RzILOpEffect *instruction_sequence = seq_49; + return instruction_sequence; +} + +// Rdd = vsubh(Rtt,Rss):sat +RzILOpEffect *hex_il_op_a2_vsubhs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp107 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp107", VARL("i")); + + // seq(h_tmp107 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_81 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_MUL_21 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(Rtt, op_MUL_21); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_22, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_29 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_30 = SHIFTRA(Rss, op_MUL_29); + RzILOpPure *op_AND_33 = LOGAND(op_RSHIFT_30, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_37 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_25), DUP(op_AND_25))), CAST(16, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(CAST(16, MSB(op_AND_33), DUP(op_AND_33))), CAST(16, MSB(DUP(op_AND_33)), DUP(op_AND_33)))); + RzILOpPure *op_MUL_44 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_45 = SHIFTRA(DUP(Rtt), op_MUL_44); + RzILOpPure *op_AND_48 = LOGAND(op_RSHIFT_45, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_51 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_52 = SHIFTRA(DUP(Rss), op_MUL_51); + RzILOpPure *op_AND_55 = LOGAND(op_RSHIFT_52, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_59 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_48), DUP(op_AND_48))), CAST(16, MSB(DUP(op_AND_48)), DUP(op_AND_48))), CAST(32, MSB(CAST(16, MSB(op_AND_55), DUP(op_AND_55))), CAST(16, MSB(DUP(op_AND_55)), DUP(op_AND_55)))); + RzILOpPure *op_EQ_61 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_37), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_SUB_59), DUP(op_SUB_59))); + RzILOpPure *op_MUL_83 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_84 = SHIFTRA(DUP(Rtt), op_MUL_83); + RzILOpPure *op_AND_87 = LOGAND(op_RSHIFT_84, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_90 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_91 = SHIFTRA(DUP(Rss), op_MUL_90); + RzILOpPure *op_AND_94 = LOGAND(op_RSHIFT_91, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_98 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_87), DUP(op_AND_87))), CAST(16, MSB(DUP(op_AND_87)), DUP(op_AND_87))), CAST(32, MSB(CAST(16, MSB(op_AND_94), DUP(op_AND_94))), CAST(16, MSB(DUP(op_AND_94)), DUP(op_AND_94)))); + RzILOpPure *op_LT_100 = SLT(op_SUB_98, SN(32, 0)); + RzILOpPure *op_LSHIFT_105 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_106 = NEG(op_LSHIFT_105); + RzILOpPure *op_LSHIFT_111 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_114 = SUB(op_LSHIFT_111, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_115 = ITE(op_LT_100, op_NEG_106, op_SUB_114); + RzILOpEffect *gcc_expr_116 = BRANCH(op_EQ_61, EMPTY(), set_usr_field_call_81); + + // h_tmp108 = HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_118 = SETL("h_tmp108", cond_115); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rtt > ...; + RzILOpEffect *seq_119 = SEQN(2, gcc_expr_116, op_ASSIGN_hybrid_tmp_118); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((sextract64(((ut64) ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))))) ? ((st64) ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))) : h_tmp108) & ((st64) 0xffff))) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_63 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_64 = SHIFTRA(DUP(Rtt), op_MUL_63); + RzILOpPure *op_AND_67 = LOGAND(op_RSHIFT_64, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_70 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_71 = SHIFTRA(DUP(Rss), op_MUL_70); + RzILOpPure *op_AND_74 = LOGAND(op_RSHIFT_71, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_78 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_67), DUP(op_AND_67))), CAST(16, MSB(DUP(op_AND_67)), DUP(op_AND_67))), CAST(32, MSB(CAST(16, MSB(op_AND_74), DUP(op_AND_74))), CAST(16, MSB(DUP(op_AND_74)), DUP(op_AND_74)))); + RzILOpPure *cond_121 = ITE(DUP(op_EQ_61), CAST(64, MSB(op_SUB_78), DUP(op_SUB_78)), VARL("h_tmp108")); + RzILOpPure *op_AND_124 = LOGAND(cond_121, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_127 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_128 = SHIFTL0(CAST(64, IL_FALSE, op_AND_124), op_MUL_127); + RzILOpPure *op_OR_130 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_128); + RzILOpEffect *op_ASSIGN_132 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_130)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((R ...; + RzILOpEffect *seq_133 = SEQN(2, seq_119, op_ASSIGN_132); + + // seq(h_tmp107; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st3 ...; + RzILOpEffect *seq_135 = seq_133; + + // seq(seq(h_tmp107; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ( ...; + RzILOpEffect *seq_136 = SEQN(2, seq_135, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp107; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_137 = REPEAT(op_LT_4, seq_136); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp107; seq(seq(HYB(g ...; + RzILOpEffect *seq_138 = SEQN(2, op_ASSIGN_2, for_137); + + RzILOpEffect *instruction_sequence = seq_138; + return instruction_sequence; +} + +// Rdd = vsubub(Rtt,Rss) +RzILOpEffect *hex_il_op_a2_vsubub(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp109 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp109", VARL("i")); + + // seq(h_tmp109 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x8)))) | (((ut64) (((st64) ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff)))) - ((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff))))) & 0xff)) << i * 0x8))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rtt, op_MUL_18); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rss, op_MUL_26); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_27, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_SUB_34 = SUB(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_22)), CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_30))); + RzILOpPure *op_AND_37 = LOGAND(CAST(64, MSB(op_SUB_34), DUP(op_SUB_34)), SN(64, 0xff)); + RzILOpPure *op_MUL_40 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_41 = SHIFTL0(CAST(64, IL_FALSE, op_AND_37), op_MUL_40); + RzILOpPure *op_OR_43 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_41); + RzILOpEffect *op_ASSIGN_45 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_43)); + + // seq(h_tmp109; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x8)) ...; + RzILOpEffect *seq_47 = op_ASSIGN_45; + + // seq(seq(h_tmp109; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0 ...; + RzILOpEffect *seq_48 = SEQN(2, seq_47, seq_8); + + // while ((i < 0x8)) { seq(seq(h_tmp109; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0 ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 8)); + RzILOpEffect *for_49 = REPEAT(op_LT_4, seq_48); + + // seq(i = 0x0; while ((i < 0x8)) { seq(seq(h_tmp109; Rdd = ((st64) ...; + RzILOpEffect *seq_50 = SEQN(2, op_ASSIGN_2, for_49); + + RzILOpEffect *instruction_sequence = seq_50; + return instruction_sequence; +} + +// Rdd = vsubub(Rtt,Rss):sat +RzILOpEffect *hex_il_op_a2_vsububs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp110 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp110", VARL("i")); + + // seq(h_tmp110 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_81 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((extract64(((ut64) ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff)))) - ((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff))))), 0x0, 0x8) == ((ut64) ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff)))) - ((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff)))) - ((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) < 0x0) ? ((st64) 0x0) : (0x1 << 0x8) - ((st64) 0x1))); + RzILOpPure *op_MUL_21 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(Rtt, op_MUL_21); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_22, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_29 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_30 = SHIFTRA(Rss, op_MUL_29); + RzILOpPure *op_AND_33 = LOGAND(op_RSHIFT_30, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_SUB_37 = SUB(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_25)), CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_33))); + RzILOpPure *op_MUL_44 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_45 = SHIFTRA(DUP(Rtt), op_MUL_44); + RzILOpPure *op_AND_48 = LOGAND(op_RSHIFT_45, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_51 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_52 = SHIFTRA(DUP(Rss), op_MUL_51); + RzILOpPure *op_AND_55 = LOGAND(op_RSHIFT_52, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_SUB_59 = SUB(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_48)), CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_55))); + RzILOpPure *op_EQ_61 = EQ(EXTRACT64(CAST(64, IL_FALSE, op_SUB_37), SN(32, 0), SN(32, 8)), CAST(64, IL_FALSE, op_SUB_59)); + RzILOpPure *op_MUL_83 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_84 = SHIFTRA(DUP(Rtt), op_MUL_83); + RzILOpPure *op_AND_87 = LOGAND(op_RSHIFT_84, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_90 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_91 = SHIFTRA(DUP(Rss), op_MUL_90); + RzILOpPure *op_AND_94 = LOGAND(op_RSHIFT_91, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_SUB_98 = SUB(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_87)), CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_94))); + RzILOpPure *op_LT_100 = SLT(op_SUB_98, SN(32, 0)); + RzILOpPure *op_LSHIFT_104 = SHIFTL0(SN(64, 1), SN(32, 8)); + RzILOpPure *op_SUB_107 = SUB(op_LSHIFT_104, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_109 = ITE(op_LT_100, CAST(64, MSB(SN(32, 0)), SN(32, 0)), op_SUB_107); + RzILOpEffect *gcc_expr_110 = BRANCH(op_EQ_61, EMPTY(), set_usr_field_call_81); + + // h_tmp111 = HYB(gcc_expr_if ((extract64(((ut64) ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff)))) - ((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff))))), 0x0, 0x8) == ((ut64) ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff)))) - ((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff)))) - ((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) < 0x0) ? ((st64) 0x0) : (0x1 << 0x8) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_112 = SETL("h_tmp111", cond_109); + + // seq(HYB(gcc_expr_if ((extract64(((ut64) ((st32) ((ut8) ((Rtt >> ...; + RzILOpEffect *seq_113 = SEQN(2, gcc_expr_110, op_ASSIGN_hybrid_tmp_112); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x8)))) | (((ut64) (((extract64(((ut64) ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff)))) - ((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff))))), 0x0, 0x8) == ((ut64) ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff)))) - ((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))))) ? ((st64) ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff)))) - ((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff))))) : h_tmp111) & 0xff)) << i * 0x8))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_63 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_64 = SHIFTRA(DUP(Rtt), op_MUL_63); + RzILOpPure *op_AND_67 = LOGAND(op_RSHIFT_64, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_70 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_71 = SHIFTRA(DUP(Rss), op_MUL_70); + RzILOpPure *op_AND_74 = LOGAND(op_RSHIFT_71, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_SUB_78 = SUB(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_67)), CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_74))); + RzILOpPure *cond_115 = ITE(DUP(op_EQ_61), CAST(64, MSB(op_SUB_78), DUP(op_SUB_78)), VARL("h_tmp111")); + RzILOpPure *op_AND_117 = LOGAND(cond_115, SN(64, 0xff)); + RzILOpPure *op_MUL_120 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_121 = SHIFTL0(CAST(64, IL_FALSE, op_AND_117), op_MUL_120); + RzILOpPure *op_OR_123 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_121); + RzILOpEffect *op_ASSIGN_125 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_123)); + + // seq(seq(HYB(gcc_expr_if ((extract64(((ut64) ((st32) ((ut8) ((Rtt ...; + RzILOpEffect *seq_126 = SEQN(2, seq_113, op_ASSIGN_125); + + // seq(h_tmp110; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) ((st32 ...; + RzILOpEffect *seq_128 = seq_126; + + // seq(seq(h_tmp110; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) (( ...; + RzILOpEffect *seq_129 = SEQN(2, seq_128, seq_8); + + // while ((i < 0x8)) { seq(seq(h_tmp110; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) (( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 8)); + RzILOpEffect *for_130 = REPEAT(op_LT_4, seq_129); + + // seq(i = 0x0; while ((i < 0x8)) { seq(seq(h_tmp110; seq(seq(HYB(g ...; + RzILOpEffect *seq_131 = SEQN(2, op_ASSIGN_2, for_130); + + RzILOpEffect *instruction_sequence = seq_131; + return instruction_sequence; +} + +// Rdd = vsubuh(Rtt,Rss):sat +RzILOpEffect *hex_il_op_a2_vsubuhs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp112 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp112", VARL("i")); + + // seq(h_tmp112 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_81 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((extract64(((ut64) ((st32) ((ut16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff))))), 0x0, 0x10) == ((ut64) ((st32) ((ut16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((ut16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) < 0x0) ? ((st64) 0x0) : (0x1 << 0x10) - ((st64) 0x1))); + RzILOpPure *op_MUL_21 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(Rtt, op_MUL_21); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_22, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_29 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_30 = SHIFTRA(Rss, op_MUL_29); + RzILOpPure *op_AND_33 = LOGAND(op_RSHIFT_30, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_37 = SUB(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_25)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_33))); + RzILOpPure *op_MUL_44 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_45 = SHIFTRA(DUP(Rtt), op_MUL_44); + RzILOpPure *op_AND_48 = LOGAND(op_RSHIFT_45, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_51 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_52 = SHIFTRA(DUP(Rss), op_MUL_51); + RzILOpPure *op_AND_55 = LOGAND(op_RSHIFT_52, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_59 = SUB(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_48)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_55))); + RzILOpPure *op_EQ_61 = EQ(EXTRACT64(CAST(64, IL_FALSE, op_SUB_37), SN(32, 0), SN(32, 16)), CAST(64, IL_FALSE, op_SUB_59)); + RzILOpPure *op_MUL_83 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_84 = SHIFTRA(DUP(Rtt), op_MUL_83); + RzILOpPure *op_AND_87 = LOGAND(op_RSHIFT_84, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_90 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_91 = SHIFTRA(DUP(Rss), op_MUL_90); + RzILOpPure *op_AND_94 = LOGAND(op_RSHIFT_91, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_98 = SUB(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_87)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_94))); + RzILOpPure *op_LT_100 = SLT(op_SUB_98, SN(32, 0)); + RzILOpPure *op_LSHIFT_104 = SHIFTL0(SN(64, 1), SN(32, 16)); + RzILOpPure *op_SUB_107 = SUB(op_LSHIFT_104, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_109 = ITE(op_LT_100, CAST(64, MSB(SN(32, 0)), SN(32, 0)), op_SUB_107); + RzILOpEffect *gcc_expr_110 = BRANCH(op_EQ_61, EMPTY(), set_usr_field_call_81); + + // h_tmp113 = HYB(gcc_expr_if ((extract64(((ut64) ((st32) ((ut16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff))))), 0x0, 0x10) == ((ut64) ((st32) ((ut16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((ut16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) < 0x0) ? ((st64) 0x0) : (0x1 << 0x10) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_112 = SETL("h_tmp113", cond_109); + + // seq(HYB(gcc_expr_if ((extract64(((ut64) ((st32) ((ut16) ((Rtt >> ...; + RzILOpEffect *seq_113 = SEQN(2, gcc_expr_110, op_ASSIGN_hybrid_tmp_112); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((extract64(((ut64) ((st32) ((ut16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff))))), 0x0, 0x10) == ((ut64) ((st32) ((ut16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff)))))) ? ((st64) ((st32) ((ut16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff))))) : h_tmp113) & ((st64) 0xffff))) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_63 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_64 = SHIFTRA(DUP(Rtt), op_MUL_63); + RzILOpPure *op_AND_67 = LOGAND(op_RSHIFT_64, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_70 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_71 = SHIFTRA(DUP(Rss), op_MUL_70); + RzILOpPure *op_AND_74 = LOGAND(op_RSHIFT_71, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_78 = SUB(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_67)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_74))); + RzILOpPure *cond_115 = ITE(DUP(op_EQ_61), CAST(64, MSB(op_SUB_78), DUP(op_SUB_78)), VARL("h_tmp113")); + RzILOpPure *op_AND_118 = LOGAND(cond_115, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_121 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_122 = SHIFTL0(CAST(64, IL_FALSE, op_AND_118), op_MUL_121); + RzILOpPure *op_OR_124 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_122); + RzILOpEffect *op_ASSIGN_126 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_124)); + + // seq(seq(HYB(gcc_expr_if ((extract64(((ut64) ((st32) ((ut16) ((Rt ...; + RzILOpEffect *seq_127 = SEQN(2, seq_113, op_ASSIGN_126); + + // seq(h_tmp112; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) ((st32 ...; + RzILOpEffect *seq_129 = seq_127; + + // seq(seq(h_tmp112; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) (( ...; + RzILOpEffect *seq_130 = SEQN(2, seq_129, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp112; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) (( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_131 = REPEAT(op_LT_4, seq_130); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp112; seq(seq(HYB(g ...; + RzILOpEffect *seq_132 = SEQN(2, op_ASSIGN_2, for_131); + + RzILOpEffect *instruction_sequence = seq_132; + return instruction_sequence; +} + +// Rdd = vsubw(Rtt,Rss) +RzILOpEffect *hex_il_op_a2_vsubw(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp114 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp114", VARL("i")); + + // seq(h_tmp114 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ((((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff))) - ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) & 0xffffffff) << i * 0x20)); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffffffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rtt, op_MUL_18); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_19, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rss, op_MUL_26); + RzILOpPure *op_AND_29 = LOGAND(op_RSHIFT_27, SN(64, 0xffffffff)); + RzILOpPure *op_SUB_32 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_21), DUP(op_AND_21))), CAST(32, MSB(DUP(op_AND_21)), DUP(op_AND_21))), CAST(64, MSB(CAST(32, MSB(op_AND_29), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29)))); + RzILOpPure *op_AND_34 = LOGAND(op_SUB_32, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_36 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_37 = SHIFTL0(op_AND_34, op_MUL_36); + RzILOpPure *op_OR_38 = LOGOR(op_AND_15, op_LSHIFT_37); + RzILOpEffect *op_ASSIGN_39 = WRITE_REG(bundle, Rdd_op, op_OR_38); + + // seq(h_tmp114; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ((((s ...; + RzILOpEffect *seq_41 = op_ASSIGN_39; + + // seq(seq(h_tmp114; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ( ...; + RzILOpEffect *seq_42 = SEQN(2, seq_41, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp114; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_43 = REPEAT(op_LT_4, seq_42); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp114; Rdd = ((Rdd & ...; + RzILOpEffect *seq_44 = SEQN(2, op_ASSIGN_2, for_43); + + RzILOpEffect *instruction_sequence = seq_44; + return instruction_sequence; +} + +// Rdd = vsubw(Rtt,Rss):sat +RzILOpEffect *hex_il_op_a2_vsubws(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp115 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp115", VARL("i")); + + // seq(h_tmp115 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_74 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff))) - ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) == ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff))) - ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff))) - ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_MUL_21 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(Rtt, op_MUL_21); + RzILOpPure *op_AND_24 = LOGAND(op_RSHIFT_22, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_29 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_30 = SHIFTRA(Rss, op_MUL_29); + RzILOpPure *op_AND_32 = LOGAND(op_RSHIFT_30, SN(64, 0xffffffff)); + RzILOpPure *op_SUB_35 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_24), DUP(op_AND_24))), CAST(32, MSB(DUP(op_AND_24)), DUP(op_AND_24))), CAST(64, MSB(CAST(32, MSB(op_AND_32), DUP(op_AND_32))), CAST(32, MSB(DUP(op_AND_32)), DUP(op_AND_32)))); + RzILOpPure *op_MUL_42 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_43 = SHIFTRA(DUP(Rtt), op_MUL_42); + RzILOpPure *op_AND_45 = LOGAND(op_RSHIFT_43, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_49 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_50 = SHIFTRA(DUP(Rss), op_MUL_49); + RzILOpPure *op_AND_52 = LOGAND(op_RSHIFT_50, SN(64, 0xffffffff)); + RzILOpPure *op_SUB_55 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_45), DUP(op_AND_45))), CAST(32, MSB(DUP(op_AND_45)), DUP(op_AND_45))), CAST(64, MSB(CAST(32, MSB(op_AND_52), DUP(op_AND_52))), CAST(32, MSB(DUP(op_AND_52)), DUP(op_AND_52)))); + RzILOpPure *op_EQ_56 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_35), SN(32, 0), SN(32, 0x20)), op_SUB_55); + RzILOpPure *op_MUL_76 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_77 = SHIFTRA(DUP(Rtt), op_MUL_76); + RzILOpPure *op_AND_79 = LOGAND(op_RSHIFT_77, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_83 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_84 = SHIFTRA(DUP(Rss), op_MUL_83); + RzILOpPure *op_AND_86 = LOGAND(op_RSHIFT_84, SN(64, 0xffffffff)); + RzILOpPure *op_SUB_89 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_79), DUP(op_AND_79))), CAST(32, MSB(DUP(op_AND_79)), DUP(op_AND_79))), CAST(64, MSB(CAST(32, MSB(op_AND_86), DUP(op_AND_86))), CAST(32, MSB(DUP(op_AND_86)), DUP(op_AND_86)))); + RzILOpPure *op_LT_92 = SLT(op_SUB_89, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_97 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_98 = NEG(op_LSHIFT_97); + RzILOpPure *op_LSHIFT_103 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_106 = SUB(op_LSHIFT_103, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_107 = ITE(op_LT_92, op_NEG_98, op_SUB_106); + RzILOpEffect *gcc_expr_108 = BRANCH(op_EQ_56, EMPTY(), set_usr_field_call_74); + + // h_tmp116 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff))) - ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) == ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff))) - ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff))) - ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_110 = SETL("h_tmp116", cond_107); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rtt > ...; + RzILOpEffect *seq_111 = SEQN(2, gcc_expr_108, op_ASSIGN_hybrid_tmp_110); + + // Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff))) - ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x20) == ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff))) - ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))) ? ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff))) - ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) : h_tmp116) & 0xffffffff) << i * 0x20)); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffffffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_58 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_59 = SHIFTRA(DUP(Rtt), op_MUL_58); + RzILOpPure *op_AND_61 = LOGAND(op_RSHIFT_59, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_65 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_66 = SHIFTRA(DUP(Rss), op_MUL_65); + RzILOpPure *op_AND_68 = LOGAND(op_RSHIFT_66, SN(64, 0xffffffff)); + RzILOpPure *op_SUB_71 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_61), DUP(op_AND_61))), CAST(32, MSB(DUP(op_AND_61)), DUP(op_AND_61))), CAST(64, MSB(CAST(32, MSB(op_AND_68), DUP(op_AND_68))), CAST(32, MSB(DUP(op_AND_68)), DUP(op_AND_68)))); + RzILOpPure *cond_112 = ITE(DUP(op_EQ_56), op_SUB_71, VARL("h_tmp116")); + RzILOpPure *op_AND_114 = LOGAND(cond_112, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_116 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_117 = SHIFTL0(op_AND_114, op_MUL_116); + RzILOpPure *op_OR_118 = LOGOR(op_AND_15, op_LSHIFT_117); + RzILOpEffect *op_ASSIGN_119 = WRITE_REG(bundle, Rdd_op, op_OR_118); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_120 = SEQN(2, seq_111, op_ASSIGN_119); + + // seq(h_tmp115; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st6 ...; + RzILOpEffect *seq_122 = seq_120; + + // seq(seq(h_tmp115; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ( ...; + RzILOpEffect *seq_123 = SEQN(2, seq_122, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp115; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_124 = REPEAT(op_LT_4, seq_123); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp115; seq(seq(HYB(g ...; + RzILOpEffect *seq_125 = SEQN(2, op_ASSIGN_2, for_124); + + RzILOpEffect *instruction_sequence = seq_125; + return instruction_sequence; +} + +// Rd = xor(Rs,Rt) +RzILOpEffect *hex_il_op_a2_xor(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = (Rs ^ Rt); + RzILOpPure *op_XOR_3 = LOGXOR(Rs, Rt); + RzILOpEffect *op_ASSIGN_4 = WRITE_REG(bundle, Rd_op, op_XOR_3); + + RzILOpEffect *instruction_sequence = op_ASSIGN_4; + return instruction_sequence; +} + +// Rdd = xor(Rss,Rtt) +RzILOpEffect *hex_il_op_a2_xorp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rdd = (Rss ^ Rtt); + RzILOpPure *op_XOR_3 = LOGXOR(Rss, Rtt); + RzILOpEffect *op_ASSIGN_4 = WRITE_REG(bundle, Rdd_op, op_XOR_3); + + RzILOpEffect *instruction_sequence = op_ASSIGN_4; + return instruction_sequence; +} + +// Rd = zxth(Rs) +RzILOpEffect *hex_il_op_a2_zxth(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = ((st32) extract64(((ut64) Rs), 0x0, 0x10)); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, EXTRACT64(CAST(64, IL_FALSE, Rs), SN(32, 0), SN(32, 16)))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_11; + return instruction_sequence; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_A4_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_A4_ops.c new file mode 100644 index 00000000000..eafba8be28f --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_A4_ops.c @@ -0,0 +1,3446 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// Rdd = add(Rss,Rtt,Px):carry +RzILOpEffect *hex_il_op_a4_addp_c(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = and(Rt,~Rs) +RzILOpEffect *hex_il_op_a4_andn(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = (Rt & (~Rs)); + RzILOpPure *op_NOT_3 = LOGNOT(Rs); + RzILOpPure *op_AND_4 = LOGAND(Rt, op_NOT_3); + RzILOpEffect *op_ASSIGN_5 = WRITE_REG(bundle, Rd_op, op_AND_4); + + RzILOpEffect *instruction_sequence = op_ASSIGN_5; + return instruction_sequence; +} + +// Rdd = and(Rtt,~Rss) +RzILOpEffect *hex_il_op_a4_andnp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // Rdd = (Rtt & (~Rss)); + RzILOpPure *op_NOT_3 = LOGNOT(Rss); + RzILOpPure *op_AND_4 = LOGAND(Rtt, op_NOT_3); + RzILOpEffect *op_ASSIGN_5 = WRITE_REG(bundle, Rdd_op, op_AND_4); + + RzILOpEffect *instruction_sequence = op_ASSIGN_5; + return instruction_sequence; +} + +// Rdd = bitsplit(Rs,Rt) +RzILOpEffect *hex_il_op_a4_bitsplit(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: ut32 shamt; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // shamt = ((ut32) extract64(((ut64) Rt), 0x0, 0x5)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, IL_FALSE, EXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 5)))); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((st64) (((ut32) Rs) >> shamt)) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_17 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_18 = LOGNOT(op_LSHIFT_17); + RzILOpPure *op_AND_19 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_18); + RzILOpPure *op_RSHIFT_22 = SHIFTR0(CAST(32, IL_FALSE, Rs), VARL("shamt")); + RzILOpPure *op_AND_25 = LOGAND(CAST(64, IL_FALSE, op_RSHIFT_22), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_29 = SHIFTL0(op_AND_25, SN(32, 0x20)); + RzILOpPure *op_OR_30 = LOGOR(op_AND_19, op_LSHIFT_29); + RzILOpEffect *op_ASSIGN_31 = WRITE_REG(bundle, Rdd_op, op_OR_30); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffffffff << 0x0)))) | ((((shamt != ((ut32) 0x0)) ? extract64(((ut64) Rs), 0x0, ((st32) shamt)) : ((ut64) 0x0)) & ((ut64) 0xffffffff)) << 0x0))); + RzILOpPure *op_LSHIFT_37 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_38 = LOGNOT(op_LSHIFT_37); + RzILOpPure *op_AND_39 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_38); + RzILOpPure *op_NE_42 = INV(EQ(VARL("shamt"), CAST(32, IL_FALSE, SN(32, 0)))); + RzILOpPure *cond_49 = ITE(op_NE_42, EXTRACT64(CAST(64, IL_FALSE, DUP(Rs)), SN(32, 0), CAST(32, IL_FALSE, VARL("shamt"))), CAST(64, IL_FALSE, SN(64, 0))); + RzILOpPure *op_AND_52 = LOGAND(cond_49, CAST(64, IL_FALSE, SN(64, 0xffffffff))); + RzILOpPure *op_LSHIFT_56 = SHIFTL0(op_AND_52, SN(32, 0)); + RzILOpPure *op_OR_58 = LOGOR(CAST(64, IL_FALSE, op_AND_39), op_LSHIFT_56); + RzILOpEffect *op_ASSIGN_60 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_58)); + + RzILOpEffect *instruction_sequence = SEQN(3, op_ASSIGN_10, op_ASSIGN_31, op_ASSIGN_60); + return instruction_sequence; +} + +// Rdd = bitsplit(Rs,Ii) +RzILOpEffect *hex_il_op_a4_bitspliti(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_10 = SETL("u", u); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((st64) (((ut32) Rs) >> u)) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_12 = SHIFTR0(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpPure *op_AND_15 = LOGAND(CAST(64, IL_FALSE, op_RSHIFT_12), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_19 = SHIFTL0(op_AND_15, SN(32, 0x20)); + RzILOpPure *op_OR_20 = LOGOR(op_AND_7, op_LSHIFT_19); + RzILOpEffect *op_ASSIGN_21 = WRITE_REG(bundle, Rdd_op, op_OR_20); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffffffff << 0x0)))) | ((((u != ((ut32) 0x0)) ? extract64(((ut64) Rs), 0x0, ((st32) u)) : ((ut64) 0x0)) & ((ut64) 0xffffffff)) << 0x0))); + RzILOpPure *op_LSHIFT_27 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_28 = LOGNOT(op_LSHIFT_27); + RzILOpPure *op_AND_29 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_28); + RzILOpPure *op_NE_32 = INV(EQ(VARL("u"), CAST(32, IL_FALSE, SN(32, 0)))); + RzILOpPure *cond_39 = ITE(op_NE_32, EXTRACT64(CAST(64, IL_FALSE, DUP(Rs)), SN(32, 0), CAST(32, IL_FALSE, VARL("u"))), CAST(64, IL_FALSE, SN(64, 0))); + RzILOpPure *op_AND_42 = LOGAND(cond_39, CAST(64, IL_FALSE, SN(64, 0xffffffff))); + RzILOpPure *op_LSHIFT_46 = SHIFTL0(op_AND_42, SN(32, 0)); + RzILOpPure *op_OR_48 = LOGOR(CAST(64, IL_FALSE, op_AND_29), op_LSHIFT_46); + RzILOpEffect *op_ASSIGN_50 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_48)); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_10, op_ASSIGN_21, op_ASSIGN_50); + return instruction_sequence; +} + +// Pd = boundscheck(Rss,Rtt):raw:hi +RzILOpEffect *hex_il_op_a4_boundscheck_hi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 src; + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // src = ((ut32) ((ut64) ((ut32) ((Rss >> 0x20) & 0xffffffff)))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(64, 0xffffffff)); + RzILOpEffect *op_ASSIGN_11 = SETL("src", CAST(32, IL_FALSE, CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_7)))); + + // Pd = ((st8) (((((ut64) src) >= ((ut64) ((ut32) ((Rtt >> 0x0) & 0xffffffff)))) && (((ut64) src) < ((ut64) ((ut32) ((Rtt >> 0x20) & 0xffffffff))))) ? 0xff : 0x0)); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(64, 0xffffffff)); + RzILOpPure *op_GE_23 = UGE(CAST(64, IL_FALSE, VARL("src")), CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_19))); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_29 = LOGAND(op_RSHIFT_27, SN(64, 0xffffffff)); + RzILOpPure *op_LT_33 = ULT(CAST(64, IL_FALSE, VARL("src")), CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_29))); + RzILOpPure *op_AND_34 = AND(op_GE_23, op_LT_33); + RzILOpPure *cond_37 = ITE(op_AND_34, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_39 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_37), DUP(cond_37))); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_11, op_ASSIGN_39); + return instruction_sequence; +} + +// Pd = boundscheck(Rss,Rtt):raw:lo +RzILOpEffect *hex_il_op_a4_boundscheck_lo(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 src; + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // src = ((ut32) ((ut64) ((ut32) ((Rss >> 0x0) & 0xffffffff)))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(64, 0xffffffff)); + RzILOpEffect *op_ASSIGN_11 = SETL("src", CAST(32, IL_FALSE, CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_7)))); + + // Pd = ((st8) (((((ut64) src) >= ((ut64) ((ut32) ((Rtt >> 0x0) & 0xffffffff)))) && (((ut64) src) < ((ut64) ((ut32) ((Rtt >> 0x20) & 0xffffffff))))) ? 0xff : 0x0)); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(64, 0xffffffff)); + RzILOpPure *op_GE_23 = UGE(CAST(64, IL_FALSE, VARL("src")), CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_19))); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_29 = LOGAND(op_RSHIFT_27, SN(64, 0xffffffff)); + RzILOpPure *op_LT_33 = ULT(CAST(64, IL_FALSE, VARL("src")), CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_29))); + RzILOpPure *op_AND_34 = AND(op_GE_23, op_LT_33); + RzILOpPure *cond_37 = ITE(op_AND_34, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_39 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_37), DUP(cond_37))); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_11, op_ASSIGN_39); + return instruction_sequence; +} + +// Pd = cmpb.eq(Rs,Rt) +RzILOpEffect *hex_il_op_a4_cmpbeq(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Pd = ((st8) ((((st8) ((Rs >> 0x0) & 0xff)) == ((st8) ((Rt >> 0x0) & 0xff))) ? 0xff : 0x0)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xff)); + RzILOpPure *op_RSHIFT_13 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_13, SN(32, 0xff)); + RzILOpPure *op_EQ_17 = EQ(CAST(8, MSB(op_AND_7), DUP(op_AND_7)), CAST(8, MSB(op_AND_15), DUP(op_AND_15))); + RzILOpPure *cond_20 = ITE(op_EQ_17, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_20), DUP(cond_20))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Pd = cmpb.eq(Rs,Ii) +RzILOpEffect *hex_il_op_a4_cmpbeqi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_9 = SETL("u", u); + + // Pd = ((st8) ((((ut32) ((ut8) ((Rs >> 0x0) & 0xff))) == u) ? 0xff : 0x0)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xff)); + RzILOpPure *op_EQ_12 = EQ(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_7)), VARL("u")); + RzILOpPure *cond_15 = ITE(op_EQ_12, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_15), DUP(cond_15))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_9, op_ASSIGN_17); + return instruction_sequence; +} + +// Pd = cmpb.gt(Rs,Rt) +RzILOpEffect *hex_il_op_a4_cmpbgt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Pd = ((st8) ((((st8) ((Rs >> 0x0) & 0xff)) > ((st8) ((Rt >> 0x0) & 0xff))) ? 0xff : 0x0)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xff)); + RzILOpPure *op_RSHIFT_13 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_13, SN(32, 0xff)); + RzILOpPure *op_GT_17 = SGT(CAST(8, MSB(op_AND_7), DUP(op_AND_7)), CAST(8, MSB(op_AND_15), DUP(op_AND_15))); + RzILOpPure *cond_20 = ITE(op_GT_17, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_20), DUP(cond_20))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Pd = cmpb.gt(Rs,Ii) +RzILOpEffect *hex_il_op_a4_cmpbgti(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + + // s = s; + RzILOpEffect *imm_assign_9 = SETL("s", s); + + // Pd = ((st8) ((((st32) ((st8) ((Rs >> 0x0) & 0xff))) > s) ? 0xff : 0x0)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xff)); + RzILOpPure *op_GT_12 = SGT(CAST(32, MSB(CAST(8, MSB(op_AND_7), DUP(op_AND_7))), CAST(8, MSB(DUP(op_AND_7)), DUP(op_AND_7))), VARL("s")); + RzILOpPure *cond_15 = ITE(op_GT_12, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_15), DUP(cond_15))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_9, op_ASSIGN_17); + return instruction_sequence; +} + +// Pd = cmpb.gtu(Rs,Rt) +RzILOpEffect *hex_il_op_a4_cmpbgtu(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Pd = ((st8) ((((ut8) ((Rs >> 0x0) & 0xff)) > ((ut8) ((Rt >> 0x0) & 0xff))) ? 0xff : 0x0)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xff)); + RzILOpPure *op_RSHIFT_13 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_13, SN(32, 0xff)); + RzILOpPure *op_GT_17 = UGT(CAST(8, IL_FALSE, op_AND_7), CAST(8, IL_FALSE, op_AND_15)); + RzILOpPure *cond_20 = ITE(op_GT_17, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_20), DUP(cond_20))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Pd = cmpb.gtu(Rs,Ii) +RzILOpEffect *hex_il_op_a4_cmpbgtui(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // Pd = ((st8) ((((ut32) ((ut8) ((Rs >> 0x0) & 0xff))) > u) ? 0xff : 0x0)); + RzILOpPure *op_RSHIFT_7 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_9 = LOGAND(op_RSHIFT_7, SN(32, 0xff)); + RzILOpPure *op_GT_12 = UGT(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_9)), VARL("u")); + RzILOpPure *cond_15 = ITE(op_GT_12, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_15), DUP(cond_15))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_0, op_ASSIGN_17); + return instruction_sequence; +} + +// Pd = cmph.eq(Rs,Rt) +RzILOpEffect *hex_il_op_a4_cmpheq(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Pd = ((st8) ((((st16) ((Rs >> 0x0) & 0xffff)) == ((st16) ((Rt >> 0x0) & 0xffff))) ? 0xff : 0x0)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_13 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_13, SN(32, 0xffff)); + RzILOpPure *op_EQ_17 = EQ(CAST(16, MSB(op_AND_7), DUP(op_AND_7)), CAST(16, MSB(op_AND_15), DUP(op_AND_15))); + RzILOpPure *cond_20 = ITE(op_EQ_17, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_20), DUP(cond_20))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Pd = cmph.eq(Rs,Ii) +RzILOpEffect *hex_il_op_a4_cmpheqi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // Pd = ((st8) ((((st32) ((st16) ((Rs >> 0x0) & 0xffff))) == s) ? 0xff : 0x0)); + RzILOpPure *op_RSHIFT_7 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_9 = LOGAND(op_RSHIFT_7, SN(32, 0xffff)); + RzILOpPure *op_EQ_12 = EQ(CAST(32, MSB(CAST(16, MSB(op_AND_9), DUP(op_AND_9))), CAST(16, MSB(DUP(op_AND_9)), DUP(op_AND_9))), VARL("s")); + RzILOpPure *cond_15 = ITE(op_EQ_12, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_15), DUP(cond_15))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_0, op_ASSIGN_17); + return instruction_sequence; +} + +// Pd = cmph.gt(Rs,Rt) +RzILOpEffect *hex_il_op_a4_cmphgt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Pd = ((st8) ((((st16) ((Rs >> 0x0) & 0xffff)) > ((st16) ((Rt >> 0x0) & 0xffff))) ? 0xff : 0x0)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_13 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_13, SN(32, 0xffff)); + RzILOpPure *op_GT_17 = SGT(CAST(16, MSB(op_AND_7), DUP(op_AND_7)), CAST(16, MSB(op_AND_15), DUP(op_AND_15))); + RzILOpPure *cond_20 = ITE(op_GT_17, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_20), DUP(cond_20))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Pd = cmph.gt(Rs,Ii) +RzILOpEffect *hex_il_op_a4_cmphgti(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // Pd = ((st8) ((((st32) ((st16) ((Rs >> 0x0) & 0xffff))) > s) ? 0xff : 0x0)); + RzILOpPure *op_RSHIFT_7 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_9 = LOGAND(op_RSHIFT_7, SN(32, 0xffff)); + RzILOpPure *op_GT_12 = SGT(CAST(32, MSB(CAST(16, MSB(op_AND_9), DUP(op_AND_9))), CAST(16, MSB(DUP(op_AND_9)), DUP(op_AND_9))), VARL("s")); + RzILOpPure *cond_15 = ITE(op_GT_12, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_15), DUP(cond_15))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_0, op_ASSIGN_17); + return instruction_sequence; +} + +// Pd = cmph.gtu(Rs,Rt) +RzILOpEffect *hex_il_op_a4_cmphgtu(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Pd = ((st8) ((((ut16) ((Rs >> 0x0) & 0xffff)) > ((ut16) ((Rt >> 0x0) & 0xffff))) ? 0xff : 0x0)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_13 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_13, SN(32, 0xffff)); + RzILOpPure *op_GT_17 = UGT(CAST(16, IL_FALSE, op_AND_7), CAST(16, IL_FALSE, op_AND_15)); + RzILOpPure *cond_20 = ITE(op_GT_17, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_20), DUP(cond_20))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Pd = cmph.gtu(Rs,Ii) +RzILOpEffect *hex_il_op_a4_cmphgtui(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // Pd = ((st8) ((((ut32) ((ut16) ((Rs >> 0x0) & 0xffff))) > u) ? 0xff : 0x0)); + RzILOpPure *op_RSHIFT_7 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_9 = LOGAND(op_RSHIFT_7, SN(32, 0xffff)); + RzILOpPure *op_GT_12 = UGT(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_9)), VARL("u")); + RzILOpPure *cond_15 = ITE(op_GT_12, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_15), DUP(cond_15))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_0, op_ASSIGN_17); + return instruction_sequence; +} + +// Rdd = combine(Ii,II) +RzILOpEffect *hex_il_op_a4_combineii(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((st64) U) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_7 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_8 = LOGNOT(op_LSHIFT_7); + RzILOpPure *op_AND_9 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_8); + RzILOpPure *op_AND_12 = LOGAND(CAST(64, IL_FALSE, VARL("U")), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_16 = SHIFTL0(op_AND_12, SN(32, 0)); + RzILOpPure *op_OR_17 = LOGOR(op_AND_9, op_LSHIFT_16); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rdd_op, op_OR_17); + + // s = s; + RzILOpEffect *imm_assign_27 = SETL("s", s); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((st64) s) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_25 = LOGNOT(op_LSHIFT_24); + RzILOpPure *op_AND_26 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_25); + RzILOpPure *op_AND_31 = LOGAND(CAST(64, MSB(VARL("s")), VARL("s")), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_35 = SHIFTL0(op_AND_31, SN(32, 0x20)); + RzILOpPure *op_OR_36 = LOGOR(op_AND_26, op_LSHIFT_35); + RzILOpEffect *op_ASSIGN_37 = WRITE_REG(bundle, Rdd_op, op_OR_36); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, imm_assign_27, op_ASSIGN_18, op_ASSIGN_37); + return instruction_sequence; +} + +// Rdd = combine(Ii,Rs) +RzILOpEffect *hex_il_op_a4_combineir(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((st64) Rs) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_7 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_8 = LOGNOT(op_LSHIFT_7); + RzILOpPure *op_AND_9 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_8); + RzILOpPure *op_AND_13 = LOGAND(CAST(64, MSB(Rs), DUP(Rs)), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_17 = SHIFTL0(op_AND_13, SN(32, 0)); + RzILOpPure *op_OR_18 = LOGOR(op_AND_9, op_LSHIFT_17); + RzILOpEffect *op_ASSIGN_19 = WRITE_REG(bundle, Rdd_op, op_OR_18); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((st64) s) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_25 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_26 = LOGNOT(op_LSHIFT_25); + RzILOpPure *op_AND_27 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_26); + RzILOpPure *op_AND_30 = LOGAND(CAST(64, MSB(VARL("s")), VARL("s")), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_34 = SHIFTL0(op_AND_30, SN(32, 0x20)); + RzILOpPure *op_OR_35 = LOGOR(op_AND_27, op_LSHIFT_34); + RzILOpEffect *op_ASSIGN_36 = WRITE_REG(bundle, Rdd_op, op_OR_35); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_19, op_ASSIGN_36); + return instruction_sequence; +} + +// Rdd = combine(Rs,Ii) +RzILOpEffect *hex_il_op_a4_combineri(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((st64) s) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_7 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_8 = LOGNOT(op_LSHIFT_7); + RzILOpPure *op_AND_9 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_8); + RzILOpPure *op_AND_12 = LOGAND(CAST(64, MSB(VARL("s")), VARL("s")), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_16 = SHIFTL0(op_AND_12, SN(32, 0)); + RzILOpPure *op_OR_17 = LOGOR(op_AND_9, op_LSHIFT_16); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rdd_op, op_OR_17); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((st64) Rs) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_25 = LOGNOT(op_LSHIFT_24); + RzILOpPure *op_AND_26 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_25); + RzILOpPure *op_AND_30 = LOGAND(CAST(64, MSB(Rs), DUP(Rs)), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_34 = SHIFTL0(op_AND_30, SN(32, 0x20)); + RzILOpPure *op_OR_35 = LOGOR(op_AND_26, op_LSHIFT_34); + RzILOpEffect *op_ASSIGN_36 = WRITE_REG(bundle, Rdd_op, op_OR_35); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_18, op_ASSIGN_36); + return instruction_sequence; +} + +// Rd = cround(Rs,Ii) +RzILOpEffect *hex_il_op_a4_cround_ri(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // conv_round(Rs, ((st32) u)); + RzILOpEffect *conv_round_call_5 = hex_conv_round(Rs, CAST(32, IL_FALSE, VARL("u"))); + + // h_tmp117 = conv_round(Rs, ((st32) u)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp117", SIGNED(32, VARL("ret_val"))); + + // seq(conv_round(Rs, ((st32) u)); h_tmp117 = conv_round(Rs, ((st32 ...; + RzILOpEffect *seq_8 = SEQN(2, conv_round_call_5, op_ASSIGN_hybrid_tmp_7); + + // Rd = h_tmp117; + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rd_op, VARL("h_tmp117")); + + // seq(seq(conv_round(Rs, ((st32) u)); h_tmp117 = conv_round(Rs, (( ...; + RzILOpEffect *seq_10 = SEQN(2, seq_8, op_ASSIGN_9); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, seq_10); + return instruction_sequence; +} + +// Rd = cround(Rs,Rt) +RzILOpEffect *hex_il_op_a4_cround_rr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // conv_round(Rs, ((st32) extract64(((ut64) Rt), 0x0, 0x5))); + RzILOpEffect *conv_round_call_12 = hex_conv_round(Rs, CAST(32, IL_FALSE, EXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 5)))); + + // h_tmp118 = conv_round(Rs, ((st32) extract64(((ut64) Rt), 0x0, 0x5))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_14 = SETL("h_tmp118", SIGNED(32, VARL("ret_val"))); + + // seq(conv_round(Rs, ((st32) extract64(((ut64) Rt), 0x0, 0x5))); h ...; + RzILOpEffect *seq_15 = SEQN(2, conv_round_call_12, op_ASSIGN_hybrid_tmp_14); + + // Rd = h_tmp118; + RzILOpEffect *op_ASSIGN_16 = WRITE_REG(bundle, Rd_op, VARL("h_tmp118")); + + // seq(seq(conv_round(Rs, ((st32) extract64(((ut64) Rt), 0x0, 0x5)) ...; + RzILOpEffect *seq_17 = SEQN(2, seq_15, op_ASSIGN_16); + + RzILOpEffect *instruction_sequence = seq_17; + return instruction_sequence; +} + +// immext(Ii) +RzILOpEffect *hex_il_op_a4_ext(HexInsnPktBundle *bundle) { + // READ + + RzILOpEffect *instruction_sequence = EMPTY(); + return instruction_sequence; +} + +// Rd = modwrap(Rs,Rt) +RzILOpEffect *hex_il_op_a4_modwrapu(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((st32) ((ut32) Rs) + ((ut32) Rt)); + RzILOpPure *op_ADD_7 = ADD(CAST(32, IL_FALSE, Rs), CAST(32, IL_FALSE, Rt)); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_ADD_7)); + + // Rd = ((st32) ((ut32) Rs) - ((ut32) Rt)); + RzILOpPure *op_SUB_15 = SUB(CAST(32, IL_FALSE, DUP(Rs)), CAST(32, IL_FALSE, DUP(Rt))); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_SUB_15)); + + // Rd = Rs; + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rd_op, DUP(Rs)); + + // seq(Rd = ((st32) ((ut32) Rs) - ((ut32) Rt))); + RzILOpEffect *seq_then_19 = op_ASSIGN_17; + + // seq(Rd = Rs); + RzILOpEffect *seq_else_20 = op_ASSIGN_18; + + // if ((((ut32) Rs) >= ((ut32) Rt))) {seq(Rd = ((st32) ((ut32) Rs) - ((ut32) Rt)))} else {seq(Rd = Rs)}; + RzILOpPure *op_GE_12 = UGE(CAST(32, IL_FALSE, DUP(Rs)), CAST(32, IL_FALSE, DUP(Rt))); + RzILOpEffect *branch_21 = BRANCH(op_GE_12, seq_then_19, seq_else_20); + + // seq(Rd = ((st32) ((ut32) Rs) + ((ut32) Rt))); + RzILOpEffect *seq_then_22 = op_ASSIGN_9; + + // seq(if ((((ut32) Rs) >= ((ut32) Rt))) {seq(Rd = ((st32) ((ut32) ...; + RzILOpEffect *seq_else_23 = branch_21; + + // if ((Rs < 0x0)) {seq(Rd = ((st32) ((ut32) Rs) + ((ut32) Rt)))} else {seq(if ((((ut32) Rs) >= ((ut32) Rt))) {seq(Rd = ((st32) ((ut32) ...}; + RzILOpPure *op_LT_2 = SLT(DUP(Rs), SN(32, 0)); + RzILOpEffect *branch_24 = BRANCH(op_LT_2, seq_then_22, seq_else_23); + + RzILOpEffect *instruction_sequence = branch_24; + return instruction_sequence; +} + +// Rd = or(Rt,~Rs) +RzILOpEffect *hex_il_op_a4_orn(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = (Rt | (~Rs)); + RzILOpPure *op_NOT_3 = LOGNOT(Rs); + RzILOpPure *op_OR_4 = LOGOR(Rt, op_NOT_3); + RzILOpEffect *op_ASSIGN_5 = WRITE_REG(bundle, Rd_op, op_OR_4); + + RzILOpEffect *instruction_sequence = op_ASSIGN_5; + return instruction_sequence; +} + +// Rdd = or(Rtt,~Rss) +RzILOpEffect *hex_il_op_a4_ornp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // Rdd = (Rtt | (~Rss)); + RzILOpPure *op_NOT_3 = LOGNOT(Rss); + RzILOpPure *op_OR_4 = LOGOR(Rtt, op_NOT_3); + RzILOpEffect *op_ASSIGN_5 = WRITE_REG(bundle, Rdd_op, op_OR_4); + + RzILOpEffect *instruction_sequence = op_ASSIGN_5; + return instruction_sequence; +} + +// if (!Pu) Rd = aslh(Rs) +RzILOpEffect *hex_il_op_a4_paslhf(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = (Rs << 0x10); + RzILOpPure *op_LSHIFT_8 = SHIFTL0(Rs, SN(32, 16)); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rd_op, op_LSHIFT_8); + + // nop; + RzILOpEffect *nop_10 = NOP(); + + // seq(Rd = (Rs << 0x10)); + RzILOpEffect *seq_then_11 = op_ASSIGN_9; + + // seq(nop); + RzILOpEffect *seq_else_12 = nop_10; + + // if (! (((st32) Pu) & 0x1)) {seq(Rd = (Rs << 0x10))} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_13 = BRANCH(op_INV_4, seq_then_11, seq_else_12); + + RzILOpEffect *instruction_sequence = branch_13; + return instruction_sequence; +} + +// if (!Pu.new) Rd = aslh(Rs) +RzILOpEffect *hex_il_op_a4_paslhfnew(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_new_op = ISA2REG(hi, 'u', true); + RzILOpPure *Pu_new = READ_REG(pkt, Pu_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = (Rs << 0x10); + RzILOpPure *op_LSHIFT_8 = SHIFTL0(Rs, SN(32, 16)); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rd_op, op_LSHIFT_8); + + // nop; + RzILOpEffect *nop_10 = NOP(); + + // seq(Rd = (Rs << 0x10)); + RzILOpEffect *seq_then_11 = op_ASSIGN_9; + + // seq(nop); + RzILOpEffect *seq_else_12 = nop_10; + + // if (! (((st32) Pu_new) & 0x1)) {seq(Rd = (Rs << 0x10))} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu_new), DUP(Pu_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_13 = BRANCH(op_INV_4, seq_then_11, seq_else_12); + + RzILOpEffect *instruction_sequence = branch_13; + return instruction_sequence; +} + +// if (Pu) Rd = aslh(Rs) +RzILOpEffect *hex_il_op_a4_paslht(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = (Rs << 0x10); + RzILOpPure *op_LSHIFT_7 = SHIFTL0(Rs, SN(32, 16)); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rd_op, op_LSHIFT_7); + + // nop; + RzILOpEffect *nop_9 = NOP(); + + // seq(Rd = (Rs << 0x10)); + RzILOpEffect *seq_then_10 = op_ASSIGN_8; + + // seq(nop); + RzILOpEffect *seq_else_11 = nop_9; + + // if ((((st32) Pu) & 0x1)) {seq(Rd = (Rs << 0x10))} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpEffect *branch_12 = BRANCH(NON_ZERO(op_AND_3), seq_then_10, seq_else_11); + + RzILOpEffect *instruction_sequence = branch_12; + return instruction_sequence; +} + +// if (Pu.new) Rd = aslh(Rs) +RzILOpEffect *hex_il_op_a4_paslhtnew(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_new_op = ISA2REG(hi, 'u', true); + RzILOpPure *Pu_new = READ_REG(pkt, Pu_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = (Rs << 0x10); + RzILOpPure *op_LSHIFT_7 = SHIFTL0(Rs, SN(32, 16)); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rd_op, op_LSHIFT_7); + + // nop; + RzILOpEffect *nop_9 = NOP(); + + // seq(Rd = (Rs << 0x10)); + RzILOpEffect *seq_then_10 = op_ASSIGN_8; + + // seq(nop); + RzILOpEffect *seq_else_11 = nop_9; + + // if ((((st32) Pu_new) & 0x1)) {seq(Rd = (Rs << 0x10))} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu_new), DUP(Pu_new)), SN(32, 1)); + RzILOpEffect *branch_12 = BRANCH(NON_ZERO(op_AND_3), seq_then_10, seq_else_11); + + RzILOpEffect *instruction_sequence = branch_12; + return instruction_sequence; +} + +// if (!Pu) Rd = asrh(Rs) +RzILOpEffect *hex_il_op_a4_pasrhf(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = (Rs >> 0x10); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rs, SN(32, 16)); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rd_op, op_RSHIFT_8); + + // nop; + RzILOpEffect *nop_10 = NOP(); + + // seq(Rd = (Rs >> 0x10)); + RzILOpEffect *seq_then_11 = op_ASSIGN_9; + + // seq(nop); + RzILOpEffect *seq_else_12 = nop_10; + + // if (! (((st32) Pu) & 0x1)) {seq(Rd = (Rs >> 0x10))} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_13 = BRANCH(op_INV_4, seq_then_11, seq_else_12); + + RzILOpEffect *instruction_sequence = branch_13; + return instruction_sequence; +} + +// if (!Pu.new) Rd = asrh(Rs) +RzILOpEffect *hex_il_op_a4_pasrhfnew(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_new_op = ISA2REG(hi, 'u', true); + RzILOpPure *Pu_new = READ_REG(pkt, Pu_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = (Rs >> 0x10); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rs, SN(32, 16)); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rd_op, op_RSHIFT_8); + + // nop; + RzILOpEffect *nop_10 = NOP(); + + // seq(Rd = (Rs >> 0x10)); + RzILOpEffect *seq_then_11 = op_ASSIGN_9; + + // seq(nop); + RzILOpEffect *seq_else_12 = nop_10; + + // if (! (((st32) Pu_new) & 0x1)) {seq(Rd = (Rs >> 0x10))} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu_new), DUP(Pu_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_13 = BRANCH(op_INV_4, seq_then_11, seq_else_12); + + RzILOpEffect *instruction_sequence = branch_13; + return instruction_sequence; +} + +// if (Pu) Rd = asrh(Rs) +RzILOpEffect *hex_il_op_a4_pasrht(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = (Rs >> 0x10); + RzILOpPure *op_RSHIFT_7 = SHIFTRA(Rs, SN(32, 16)); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rd_op, op_RSHIFT_7); + + // nop; + RzILOpEffect *nop_9 = NOP(); + + // seq(Rd = (Rs >> 0x10)); + RzILOpEffect *seq_then_10 = op_ASSIGN_8; + + // seq(nop); + RzILOpEffect *seq_else_11 = nop_9; + + // if ((((st32) Pu) & 0x1)) {seq(Rd = (Rs >> 0x10))} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpEffect *branch_12 = BRANCH(NON_ZERO(op_AND_3), seq_then_10, seq_else_11); + + RzILOpEffect *instruction_sequence = branch_12; + return instruction_sequence; +} + +// if (Pu.new) Rd = asrh(Rs) +RzILOpEffect *hex_il_op_a4_pasrhtnew(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_new_op = ISA2REG(hi, 'u', true); + RzILOpPure *Pu_new = READ_REG(pkt, Pu_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = (Rs >> 0x10); + RzILOpPure *op_RSHIFT_7 = SHIFTRA(Rs, SN(32, 16)); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rd_op, op_RSHIFT_7); + + // nop; + RzILOpEffect *nop_9 = NOP(); + + // seq(Rd = (Rs >> 0x10)); + RzILOpEffect *seq_then_10 = op_ASSIGN_8; + + // seq(nop); + RzILOpEffect *seq_else_11 = nop_9; + + // if ((((st32) Pu_new) & 0x1)) {seq(Rd = (Rs >> 0x10))} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu_new), DUP(Pu_new)), SN(32, 1)); + RzILOpEffect *branch_12 = BRANCH(NON_ZERO(op_AND_3), seq_then_10, seq_else_11); + + RzILOpEffect *instruction_sequence = branch_12; + return instruction_sequence; +} + +// if (!Pu) Rd = sxtb(Rs) +RzILOpEffect *hex_il_op_a4_psxtbf(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = ((st32) sextract64(((ut64) Rs), 0x0, 0x8)); + RzILOpEffect *op_ASSIGN_16 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rs), SN(32, 0), SN(32, 8))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rs)), SN(32, 0), SN(32, 8)))); + + // nop; + RzILOpEffect *nop_17 = NOP(); + + // seq(Rd = ((st32) sextract64(((ut64) Rs), 0x0, 0x8))); + RzILOpEffect *seq_then_18 = op_ASSIGN_16; + + // seq(nop); + RzILOpEffect *seq_else_19 = nop_17; + + // if (! (((st32) Pu) & 0x1)) {seq(Rd = ((st32) sextract64(((ut64) Rs), 0x0, 0x8)))} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_20 = BRANCH(op_INV_4, seq_then_18, seq_else_19); + + RzILOpEffect *instruction_sequence = branch_20; + return instruction_sequence; +} + +// if (!Pu.new) Rd = sxtb(Rs) +RzILOpEffect *hex_il_op_a4_psxtbfnew(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_new_op = ISA2REG(hi, 'u', true); + RzILOpPure *Pu_new = READ_REG(pkt, Pu_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = ((st32) sextract64(((ut64) Rs), 0x0, 0x8)); + RzILOpEffect *op_ASSIGN_16 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rs), SN(32, 0), SN(32, 8))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rs)), SN(32, 0), SN(32, 8)))); + + // nop; + RzILOpEffect *nop_17 = NOP(); + + // seq(Rd = ((st32) sextract64(((ut64) Rs), 0x0, 0x8))); + RzILOpEffect *seq_then_18 = op_ASSIGN_16; + + // seq(nop); + RzILOpEffect *seq_else_19 = nop_17; + + // if (! (((st32) Pu_new) & 0x1)) {seq(Rd = ((st32) sextract64(((ut64) Rs), 0x0, 0x8)))} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu_new), DUP(Pu_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_20 = BRANCH(op_INV_4, seq_then_18, seq_else_19); + + RzILOpEffect *instruction_sequence = branch_20; + return instruction_sequence; +} + +// if (Pu) Rd = sxtb(Rs) +RzILOpEffect *hex_il_op_a4_psxtbt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = ((st32) sextract64(((ut64) Rs), 0x0, 0x8)); + RzILOpEffect *op_ASSIGN_15 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rs), SN(32, 0), SN(32, 8))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rs)), SN(32, 0), SN(32, 8)))); + + // nop; + RzILOpEffect *nop_16 = NOP(); + + // seq(Rd = ((st32) sextract64(((ut64) Rs), 0x0, 0x8))); + RzILOpEffect *seq_then_17 = op_ASSIGN_15; + + // seq(nop); + RzILOpEffect *seq_else_18 = nop_16; + + // if ((((st32) Pu) & 0x1)) {seq(Rd = ((st32) sextract64(((ut64) Rs), 0x0, 0x8)))} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpEffect *branch_19 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, seq_else_18); + + RzILOpEffect *instruction_sequence = branch_19; + return instruction_sequence; +} + +// if (Pu.new) Rd = sxtb(Rs) +RzILOpEffect *hex_il_op_a4_psxtbtnew(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_new_op = ISA2REG(hi, 'u', true); + RzILOpPure *Pu_new = READ_REG(pkt, Pu_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = ((st32) sextract64(((ut64) Rs), 0x0, 0x8)); + RzILOpEffect *op_ASSIGN_15 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rs), SN(32, 0), SN(32, 8))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rs)), SN(32, 0), SN(32, 8)))); + + // nop; + RzILOpEffect *nop_16 = NOP(); + + // seq(Rd = ((st32) sextract64(((ut64) Rs), 0x0, 0x8))); + RzILOpEffect *seq_then_17 = op_ASSIGN_15; + + // seq(nop); + RzILOpEffect *seq_else_18 = nop_16; + + // if ((((st32) Pu_new) & 0x1)) {seq(Rd = ((st32) sextract64(((ut64) Rs), 0x0, 0x8)))} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu_new), DUP(Pu_new)), SN(32, 1)); + RzILOpEffect *branch_19 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, seq_else_18); + + RzILOpEffect *instruction_sequence = branch_19; + return instruction_sequence; +} + +// if (!Pu) Rd = sxth(Rs) +RzILOpEffect *hex_il_op_a4_psxthf(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = ((st32) sextract64(((ut64) Rs), 0x0, 0x10)); + RzILOpEffect *op_ASSIGN_16 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rs), SN(32, 0), SN(32, 16))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rs)), SN(32, 0), SN(32, 16)))); + + // nop; + RzILOpEffect *nop_17 = NOP(); + + // seq(Rd = ((st32) sextract64(((ut64) Rs), 0x0, 0x10))); + RzILOpEffect *seq_then_18 = op_ASSIGN_16; + + // seq(nop); + RzILOpEffect *seq_else_19 = nop_17; + + // if (! (((st32) Pu) & 0x1)) {seq(Rd = ((st32) sextract64(((ut64) Rs), 0x0, 0x10)))} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_20 = BRANCH(op_INV_4, seq_then_18, seq_else_19); + + RzILOpEffect *instruction_sequence = branch_20; + return instruction_sequence; +} + +// if (!Pu.new) Rd = sxth(Rs) +RzILOpEffect *hex_il_op_a4_psxthfnew(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_new_op = ISA2REG(hi, 'u', true); + RzILOpPure *Pu_new = READ_REG(pkt, Pu_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = ((st32) sextract64(((ut64) Rs), 0x0, 0x10)); + RzILOpEffect *op_ASSIGN_16 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rs), SN(32, 0), SN(32, 16))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rs)), SN(32, 0), SN(32, 16)))); + + // nop; + RzILOpEffect *nop_17 = NOP(); + + // seq(Rd = ((st32) sextract64(((ut64) Rs), 0x0, 0x10))); + RzILOpEffect *seq_then_18 = op_ASSIGN_16; + + // seq(nop); + RzILOpEffect *seq_else_19 = nop_17; + + // if (! (((st32) Pu_new) & 0x1)) {seq(Rd = ((st32) sextract64(((ut64) Rs), 0x0, 0x10)))} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu_new), DUP(Pu_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_20 = BRANCH(op_INV_4, seq_then_18, seq_else_19); + + RzILOpEffect *instruction_sequence = branch_20; + return instruction_sequence; +} + +// if (Pu) Rd = sxth(Rs) +RzILOpEffect *hex_il_op_a4_psxtht(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = ((st32) sextract64(((ut64) Rs), 0x0, 0x10)); + RzILOpEffect *op_ASSIGN_15 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rs), SN(32, 0), SN(32, 16))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rs)), SN(32, 0), SN(32, 16)))); + + // nop; + RzILOpEffect *nop_16 = NOP(); + + // seq(Rd = ((st32) sextract64(((ut64) Rs), 0x0, 0x10))); + RzILOpEffect *seq_then_17 = op_ASSIGN_15; + + // seq(nop); + RzILOpEffect *seq_else_18 = nop_16; + + // if ((((st32) Pu) & 0x1)) {seq(Rd = ((st32) sextract64(((ut64) Rs), 0x0, 0x10)))} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpEffect *branch_19 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, seq_else_18); + + RzILOpEffect *instruction_sequence = branch_19; + return instruction_sequence; +} + +// if (Pu.new) Rd = sxth(Rs) +RzILOpEffect *hex_il_op_a4_psxthtnew(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_new_op = ISA2REG(hi, 'u', true); + RzILOpPure *Pu_new = READ_REG(pkt, Pu_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = ((st32) sextract64(((ut64) Rs), 0x0, 0x10)); + RzILOpEffect *op_ASSIGN_15 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rs), SN(32, 0), SN(32, 16))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rs)), SN(32, 0), SN(32, 16)))); + + // nop; + RzILOpEffect *nop_16 = NOP(); + + // seq(Rd = ((st32) sextract64(((ut64) Rs), 0x0, 0x10))); + RzILOpEffect *seq_then_17 = op_ASSIGN_15; + + // seq(nop); + RzILOpEffect *seq_else_18 = nop_16; + + // if ((((st32) Pu_new) & 0x1)) {seq(Rd = ((st32) sextract64(((ut64) Rs), 0x0, 0x10)))} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu_new), DUP(Pu_new)), SN(32, 1)); + RzILOpEffect *branch_19 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, seq_else_18); + + RzILOpEffect *instruction_sequence = branch_19; + return instruction_sequence; +} + +// if (!Pu) Rd = zxtb(Rs) +RzILOpEffect *hex_il_op_a4_pzxtbf(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = ((st32) extract64(((ut64) Rs), 0x0, 0x8)); + RzILOpEffect *op_ASSIGN_16 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, EXTRACT64(CAST(64, IL_FALSE, Rs), SN(32, 0), SN(32, 8)))); + + // nop; + RzILOpEffect *nop_17 = NOP(); + + // seq(Rd = ((st32) extract64(((ut64) Rs), 0x0, 0x8))); + RzILOpEffect *seq_then_18 = op_ASSIGN_16; + + // seq(nop); + RzILOpEffect *seq_else_19 = nop_17; + + // if (! (((st32) Pu) & 0x1)) {seq(Rd = ((st32) extract64(((ut64) Rs), 0x0, 0x8)))} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_20 = BRANCH(op_INV_4, seq_then_18, seq_else_19); + + RzILOpEffect *instruction_sequence = branch_20; + return instruction_sequence; +} + +// if (!Pu.new) Rd = zxtb(Rs) +RzILOpEffect *hex_il_op_a4_pzxtbfnew(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_new_op = ISA2REG(hi, 'u', true); + RzILOpPure *Pu_new = READ_REG(pkt, Pu_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = ((st32) extract64(((ut64) Rs), 0x0, 0x8)); + RzILOpEffect *op_ASSIGN_16 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, EXTRACT64(CAST(64, IL_FALSE, Rs), SN(32, 0), SN(32, 8)))); + + // nop; + RzILOpEffect *nop_17 = NOP(); + + // seq(Rd = ((st32) extract64(((ut64) Rs), 0x0, 0x8))); + RzILOpEffect *seq_then_18 = op_ASSIGN_16; + + // seq(nop); + RzILOpEffect *seq_else_19 = nop_17; + + // if (! (((st32) Pu_new) & 0x1)) {seq(Rd = ((st32) extract64(((ut64) Rs), 0x0, 0x8)))} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu_new), DUP(Pu_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_20 = BRANCH(op_INV_4, seq_then_18, seq_else_19); + + RzILOpEffect *instruction_sequence = branch_20; + return instruction_sequence; +} + +// if (Pu) Rd = zxtb(Rs) +RzILOpEffect *hex_il_op_a4_pzxtbt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = ((st32) extract64(((ut64) Rs), 0x0, 0x8)); + RzILOpEffect *op_ASSIGN_15 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, EXTRACT64(CAST(64, IL_FALSE, Rs), SN(32, 0), SN(32, 8)))); + + // nop; + RzILOpEffect *nop_16 = NOP(); + + // seq(Rd = ((st32) extract64(((ut64) Rs), 0x0, 0x8))); + RzILOpEffect *seq_then_17 = op_ASSIGN_15; + + // seq(nop); + RzILOpEffect *seq_else_18 = nop_16; + + // if ((((st32) Pu) & 0x1)) {seq(Rd = ((st32) extract64(((ut64) Rs), 0x0, 0x8)))} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpEffect *branch_19 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, seq_else_18); + + RzILOpEffect *instruction_sequence = branch_19; + return instruction_sequence; +} + +// if (Pu.new) Rd = zxtb(Rs) +RzILOpEffect *hex_il_op_a4_pzxtbtnew(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_new_op = ISA2REG(hi, 'u', true); + RzILOpPure *Pu_new = READ_REG(pkt, Pu_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = ((st32) extract64(((ut64) Rs), 0x0, 0x8)); + RzILOpEffect *op_ASSIGN_15 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, EXTRACT64(CAST(64, IL_FALSE, Rs), SN(32, 0), SN(32, 8)))); + + // nop; + RzILOpEffect *nop_16 = NOP(); + + // seq(Rd = ((st32) extract64(((ut64) Rs), 0x0, 0x8))); + RzILOpEffect *seq_then_17 = op_ASSIGN_15; + + // seq(nop); + RzILOpEffect *seq_else_18 = nop_16; + + // if ((((st32) Pu_new) & 0x1)) {seq(Rd = ((st32) extract64(((ut64) Rs), 0x0, 0x8)))} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu_new), DUP(Pu_new)), SN(32, 1)); + RzILOpEffect *branch_19 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, seq_else_18); + + RzILOpEffect *instruction_sequence = branch_19; + return instruction_sequence; +} + +// if (!Pu) Rd = zxth(Rs) +RzILOpEffect *hex_il_op_a4_pzxthf(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = ((st32) extract64(((ut64) Rs), 0x0, 0x10)); + RzILOpEffect *op_ASSIGN_16 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, EXTRACT64(CAST(64, IL_FALSE, Rs), SN(32, 0), SN(32, 16)))); + + // nop; + RzILOpEffect *nop_17 = NOP(); + + // seq(Rd = ((st32) extract64(((ut64) Rs), 0x0, 0x10))); + RzILOpEffect *seq_then_18 = op_ASSIGN_16; + + // seq(nop); + RzILOpEffect *seq_else_19 = nop_17; + + // if (! (((st32) Pu) & 0x1)) {seq(Rd = ((st32) extract64(((ut64) Rs), 0x0, 0x10)))} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_20 = BRANCH(op_INV_4, seq_then_18, seq_else_19); + + RzILOpEffect *instruction_sequence = branch_20; + return instruction_sequence; +} + +// if (!Pu.new) Rd = zxth(Rs) +RzILOpEffect *hex_il_op_a4_pzxthfnew(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_new_op = ISA2REG(hi, 'u', true); + RzILOpPure *Pu_new = READ_REG(pkt, Pu_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = ((st32) extract64(((ut64) Rs), 0x0, 0x10)); + RzILOpEffect *op_ASSIGN_16 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, EXTRACT64(CAST(64, IL_FALSE, Rs), SN(32, 0), SN(32, 16)))); + + // nop; + RzILOpEffect *nop_17 = NOP(); + + // seq(Rd = ((st32) extract64(((ut64) Rs), 0x0, 0x10))); + RzILOpEffect *seq_then_18 = op_ASSIGN_16; + + // seq(nop); + RzILOpEffect *seq_else_19 = nop_17; + + // if (! (((st32) Pu_new) & 0x1)) {seq(Rd = ((st32) extract64(((ut64) Rs), 0x0, 0x10)))} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu_new), DUP(Pu_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_20 = BRANCH(op_INV_4, seq_then_18, seq_else_19); + + RzILOpEffect *instruction_sequence = branch_20; + return instruction_sequence; +} + +// if (Pu) Rd = zxth(Rs) +RzILOpEffect *hex_il_op_a4_pzxtht(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = ((st32) extract64(((ut64) Rs), 0x0, 0x10)); + RzILOpEffect *op_ASSIGN_15 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, EXTRACT64(CAST(64, IL_FALSE, Rs), SN(32, 0), SN(32, 16)))); + + // nop; + RzILOpEffect *nop_16 = NOP(); + + // seq(Rd = ((st32) extract64(((ut64) Rs), 0x0, 0x10))); + RzILOpEffect *seq_then_17 = op_ASSIGN_15; + + // seq(nop); + RzILOpEffect *seq_else_18 = nop_16; + + // if ((((st32) Pu) & 0x1)) {seq(Rd = ((st32) extract64(((ut64) Rs), 0x0, 0x10)))} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpEffect *branch_19 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, seq_else_18); + + RzILOpEffect *instruction_sequence = branch_19; + return instruction_sequence; +} + +// if (Pu.new) Rd = zxth(Rs) +RzILOpEffect *hex_il_op_a4_pzxthtnew(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_new_op = ISA2REG(hi, 'u', true); + RzILOpPure *Pu_new = READ_REG(pkt, Pu_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = ((st32) extract64(((ut64) Rs), 0x0, 0x10)); + RzILOpEffect *op_ASSIGN_15 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, EXTRACT64(CAST(64, IL_FALSE, Rs), SN(32, 0), SN(32, 16)))); + + // nop; + RzILOpEffect *nop_16 = NOP(); + + // seq(Rd = ((st32) extract64(((ut64) Rs), 0x0, 0x10))); + RzILOpEffect *seq_then_17 = op_ASSIGN_15; + + // seq(nop); + RzILOpEffect *seq_else_18 = nop_16; + + // if ((((st32) Pu_new) & 0x1)) {seq(Rd = ((st32) extract64(((ut64) Rs), 0x0, 0x10)))} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu_new), DUP(Pu_new)), SN(32, 1)); + RzILOpEffect *branch_19 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, seq_else_18); + + RzILOpEffect *instruction_sequence = branch_19; + return instruction_sequence; +} + +// Rd = cmp.eq(Rs,Rt) +RzILOpEffect *hex_il_op_a4_rcmpeq(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((Rs == Rt) ? 0x1 : 0x0); + RzILOpPure *op_EQ_3 = EQ(Rs, Rt); + RzILOpPure *ite_cast_st32_4 = ITE(op_EQ_3, SN(32, 1), SN(32, 0)); + RzILOpEffect *op_ASSIGN_5 = WRITE_REG(bundle, Rd_op, ite_cast_st32_4); + + RzILOpEffect *instruction_sequence = op_ASSIGN_5; + return instruction_sequence; +} + +// Rd = cmp.eq(Rs,Ii) +RzILOpEffect *hex_il_op_a4_rcmpeqi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // Rd = ((Rs == s) ? 0x1 : 0x0); + RzILOpPure *op_EQ_4 = EQ(Rs, VARL("s")); + RzILOpPure *ite_cast_st32_5 = ITE(op_EQ_4, SN(32, 1), SN(32, 0)); + RzILOpEffect *op_ASSIGN_6 = WRITE_REG(bundle, Rd_op, ite_cast_st32_5); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_0, op_ASSIGN_6); + return instruction_sequence; +} + +// Rd = !cmp.eq(Rs,Rt) +RzILOpEffect *hex_il_op_a4_rcmpneq(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((Rs != Rt) ? 0x1 : 0x0); + RzILOpPure *op_NE_3 = INV(EQ(Rs, Rt)); + RzILOpPure *ite_cast_st32_4 = ITE(op_NE_3, SN(32, 1), SN(32, 0)); + RzILOpEffect *op_ASSIGN_5 = WRITE_REG(bundle, Rd_op, ite_cast_st32_4); + + RzILOpEffect *instruction_sequence = op_ASSIGN_5; + return instruction_sequence; +} + +// Rd = !cmp.eq(Rs,Ii) +RzILOpEffect *hex_il_op_a4_rcmpneqi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // Rd = ((Rs != s) ? 0x1 : 0x0); + RzILOpPure *op_NE_4 = INV(EQ(Rs, VARL("s"))); + RzILOpPure *ite_cast_st32_5 = ITE(op_NE_4, SN(32, 1), SN(32, 0)); + RzILOpEffect *op_ASSIGN_6 = WRITE_REG(bundle, Rd_op, ite_cast_st32_5); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_0, op_ASSIGN_6); + return instruction_sequence; +} + +// Rd = round(Rs,Ii) +RzILOpEffect *hex_il_op_a4_round_ri(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rd = ((st32) (((u == ((ut32) 0x0)) ? ((st64) Rs) : ((st64) Rs) + ((st64) (0x1 << u - ((ut32) 0x1)))) >> u)); + RzILOpPure *op_EQ_5 = EQ(VARL("u"), CAST(32, IL_FALSE, SN(32, 0))); + RzILOpPure *op_SUB_11 = SUB(VARL("u"), CAST(32, IL_FALSE, SN(32, 1))); + RzILOpPure *op_LSHIFT_12 = SHIFTL0(SN(32, 1), op_SUB_11); + RzILOpPure *op_ADD_14 = ADD(CAST(64, MSB(Rs), DUP(Rs)), CAST(64, MSB(op_LSHIFT_12), DUP(op_LSHIFT_12))); + RzILOpPure *cond_16 = ITE(op_EQ_5, CAST(64, MSB(DUP(Rs)), DUP(Rs)), op_ADD_14); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(cond_16, VARL("u")); + RzILOpEffect *op_ASSIGN_19 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(op_RSHIFT_17), DUP(op_RSHIFT_17))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_19); + return instruction_sequence; +} + +// Rd = round(Rs,Ii):sat +RzILOpEffect *hex_il_op_a4_round_ri_sat(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // u = u; + RzILOpEffect *imm_assign_4 = SETL("u", u); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_54 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((u == ((ut32) 0x0)) ? ((st64) Rs) : ((st64) Rs) + ((st64) (0x1 << u - ((ut32) 0x1))))), 0x0, 0x20) == ((u == ((ut32) 0x0)) ? ((st64) Rs) : ((st64) Rs) + ((st64) (0x1 << u - ((ut32) 0x1)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((u == ((ut32) 0x0)) ? ((st64) Rs) : ((st64) Rs) + ((st64) (0x1 << u - ((ut32) 0x1)))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_EQ_8 = EQ(VARL("u"), CAST(32, IL_FALSE, SN(32, 0))); + RzILOpPure *op_SUB_14 = SUB(VARL("u"), CAST(32, IL_FALSE, SN(32, 1))); + RzILOpPure *op_LSHIFT_15 = SHIFTL0(SN(32, 1), op_SUB_14); + RzILOpPure *op_ADD_17 = ADD(CAST(64, MSB(Rs), DUP(Rs)), CAST(64, MSB(op_LSHIFT_15), DUP(op_LSHIFT_15))); + RzILOpPure *cond_19 = ITE(op_EQ_8, CAST(64, MSB(DUP(Rs)), DUP(Rs)), op_ADD_17); + RzILOpPure *op_EQ_27 = EQ(VARL("u"), CAST(32, IL_FALSE, SN(32, 0))); + RzILOpPure *op_SUB_32 = SUB(VARL("u"), CAST(32, IL_FALSE, SN(32, 1))); + RzILOpPure *op_LSHIFT_33 = SHIFTL0(SN(32, 1), op_SUB_32); + RzILOpPure *op_ADD_35 = ADD(CAST(64, MSB(DUP(Rs)), DUP(Rs)), CAST(64, MSB(op_LSHIFT_33), DUP(op_LSHIFT_33))); + RzILOpPure *cond_37 = ITE(op_EQ_27, CAST(64, MSB(DUP(Rs)), DUP(Rs)), op_ADD_35); + RzILOpPure *op_EQ_38 = EQ(SEXTRACT64(CAST(64, IL_FALSE, cond_19), SN(32, 0), SN(32, 0x20)), cond_37); + RzILOpPure *op_EQ_57 = EQ(VARL("u"), CAST(32, IL_FALSE, SN(32, 0))); + RzILOpPure *op_SUB_62 = SUB(VARL("u"), CAST(32, IL_FALSE, SN(32, 1))); + RzILOpPure *op_LSHIFT_63 = SHIFTL0(SN(32, 1), op_SUB_62); + RzILOpPure *op_ADD_65 = ADD(CAST(64, MSB(DUP(Rs)), DUP(Rs)), CAST(64, MSB(op_LSHIFT_63), DUP(op_LSHIFT_63))); + RzILOpPure *cond_67 = ITE(op_EQ_57, CAST(64, MSB(DUP(Rs)), DUP(Rs)), op_ADD_65); + RzILOpPure *op_LT_70 = SLT(cond_67, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_75 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_76 = NEG(op_LSHIFT_75); + RzILOpPure *op_LSHIFT_81 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_84 = SUB(op_LSHIFT_81, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_85 = ITE(op_LT_70, op_NEG_76, op_SUB_84); + RzILOpEffect *gcc_expr_86 = BRANCH(op_EQ_38, EMPTY(), set_usr_field_call_54); + + // h_tmp119 = HYB(gcc_expr_if ((sextract64(((ut64) ((u == ((ut32) 0x0)) ? ((st64) Rs) : ((st64) Rs) + ((st64) (0x1 << u - ((ut32) 0x1))))), 0x0, 0x20) == ((u == ((ut32) 0x0)) ? ((st64) Rs) : ((st64) Rs) + ((st64) (0x1 << u - ((ut32) 0x1)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((u == ((ut32) 0x0)) ? ((st64) Rs) : ((st64) Rs) + ((st64) (0x1 << u - ((ut32) 0x1)))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_88 = SETL("h_tmp119", cond_85); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((u == ((ut32) 0x0)) ? ...; + RzILOpEffect *seq_89 = SEQN(2, gcc_expr_86, op_ASSIGN_hybrid_tmp_88); + + // Rd = ((st32) (((sextract64(((ut64) ((u == ((ut32) 0x0)) ? ((st64) Rs) : ((st64) Rs) + ((st64) (0x1 << u - ((ut32) 0x1))))), 0x0, 0x20) == ((u == ((ut32) 0x0)) ? ((st64) Rs) : ((st64) Rs) + ((st64) (0x1 << u - ((ut32) 0x1))))) ? ((u == ((ut32) 0x0)) ? ((st64) Rs) : ((st64) Rs) + ((st64) (0x1 << u - ((ut32) 0x1)))) : h_tmp119) >> u)); + RzILOpPure *op_EQ_41 = EQ(VARL("u"), CAST(32, IL_FALSE, SN(32, 0))); + RzILOpPure *op_SUB_46 = SUB(VARL("u"), CAST(32, IL_FALSE, SN(32, 1))); + RzILOpPure *op_LSHIFT_47 = SHIFTL0(SN(32, 1), op_SUB_46); + RzILOpPure *op_ADD_49 = ADD(CAST(64, MSB(DUP(Rs)), DUP(Rs)), CAST(64, MSB(op_LSHIFT_47), DUP(op_LSHIFT_47))); + RzILOpPure *cond_51 = ITE(op_EQ_41, CAST(64, MSB(DUP(Rs)), DUP(Rs)), op_ADD_49); + RzILOpPure *cond_90 = ITE(DUP(op_EQ_38), cond_51, VARL("h_tmp119")); + RzILOpPure *op_RSHIFT_91 = SHIFTRA(cond_90, VARL("u")); + RzILOpEffect *op_ASSIGN_93 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(op_RSHIFT_91), DUP(op_RSHIFT_91))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((u == ((ut32) 0x0) ...; + RzILOpEffect *seq_94 = SEQN(2, seq_89, op_ASSIGN_93); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, seq_94); + return instruction_sequence; +} + +// Rd = round(Rs,Rt) +RzILOpEffect *hex_il_op_a4_round_rr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = ((st32) (((extract64(((ut64) Rt), 0x0, 0x5) == ((ut64) 0x0)) ? ((st64) Rs) : ((st64) Rs) + ((st64) (0x1 << extract64(((ut64) Rt), 0x0, 0x5) - ((ut64) 0x1)))) >> extract64(((ut64) Rt), 0x0, 0x5))); + RzILOpPure *op_EQ_12 = EQ(EXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 5)), CAST(64, IL_FALSE, SN(32, 0))); + RzILOpPure *op_SUB_26 = SUB(EXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 5)), CAST(64, IL_FALSE, SN(32, 1))); + RzILOpPure *op_LSHIFT_27 = SHIFTL0(SN(32, 1), op_SUB_26); + RzILOpPure *op_ADD_29 = ADD(CAST(64, MSB(Rs), DUP(Rs)), CAST(64, MSB(op_LSHIFT_27), DUP(op_LSHIFT_27))); + RzILOpPure *cond_31 = ITE(op_EQ_12, CAST(64, MSB(DUP(Rs)), DUP(Rs)), op_ADD_29); + RzILOpPure *op_RSHIFT_40 = SHIFTRA(cond_31, EXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 5))); + RzILOpEffect *op_ASSIGN_42 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(op_RSHIFT_40), DUP(op_RSHIFT_40))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_42; + return instruction_sequence; +} + +// Rd = round(Rs,Rt):sat +RzILOpEffect *hex_il_op_a4_round_rr_sat(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_101 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((extract64(((ut64) Rt), 0x0, 0x5) == ((ut64) 0x0)) ? ((st64) Rs) : ((st64) Rs) + ((st64) (0x1 << extract64(((ut64) Rt), 0x0, 0x5) - ((ut64) 0x1))))), 0x0, 0x20) == ((extract64(((ut64) Rt), 0x0, 0x5) == ((ut64) 0x0)) ? ((st64) Rs) : ((st64) Rs) + ((st64) (0x1 << extract64(((ut64) Rt), 0x0, 0x5) - ((ut64) 0x1)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((extract64(((ut64) Rt), 0x0, 0x5) == ((ut64) 0x0)) ? ((st64) Rs) : ((st64) Rs) + ((st64) (0x1 << extract64(((ut64) Rt), 0x0, 0x5) - ((ut64) 0x1)))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_EQ_15 = EQ(EXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 5)), CAST(64, IL_FALSE, SN(32, 0))); + RzILOpPure *op_SUB_29 = SUB(EXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 5)), CAST(64, IL_FALSE, SN(32, 1))); + RzILOpPure *op_LSHIFT_30 = SHIFTL0(SN(32, 1), op_SUB_29); + RzILOpPure *op_ADD_32 = ADD(CAST(64, MSB(Rs), DUP(Rs)), CAST(64, MSB(op_LSHIFT_30), DUP(op_LSHIFT_30))); + RzILOpPure *cond_34 = ITE(op_EQ_15, CAST(64, MSB(DUP(Rs)), DUP(Rs)), op_ADD_32); + RzILOpPure *op_EQ_50 = EQ(EXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 5)), CAST(64, IL_FALSE, SN(32, 0))); + RzILOpPure *op_SUB_63 = SUB(EXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 5)), CAST(64, IL_FALSE, SN(32, 1))); + RzILOpPure *op_LSHIFT_64 = SHIFTL0(SN(32, 1), op_SUB_63); + RzILOpPure *op_ADD_66 = ADD(CAST(64, MSB(DUP(Rs)), DUP(Rs)), CAST(64, MSB(op_LSHIFT_64), DUP(op_LSHIFT_64))); + RzILOpPure *cond_68 = ITE(op_EQ_50, CAST(64, MSB(DUP(Rs)), DUP(Rs)), op_ADD_66); + RzILOpPure *op_EQ_69 = EQ(SEXTRACT64(CAST(64, IL_FALSE, cond_34), SN(32, 0), SN(32, 0x20)), cond_68); + RzILOpPure *op_EQ_112 = EQ(EXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 5)), CAST(64, IL_FALSE, SN(32, 0))); + RzILOpPure *op_SUB_125 = SUB(EXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 5)), CAST(64, IL_FALSE, SN(32, 1))); + RzILOpPure *op_LSHIFT_126 = SHIFTL0(SN(32, 1), op_SUB_125); + RzILOpPure *op_ADD_128 = ADD(CAST(64, MSB(DUP(Rs)), DUP(Rs)), CAST(64, MSB(op_LSHIFT_126), DUP(op_LSHIFT_126))); + RzILOpPure *cond_130 = ITE(op_EQ_112, CAST(64, MSB(DUP(Rs)), DUP(Rs)), op_ADD_128); + RzILOpPure *op_LT_133 = SLT(cond_130, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_138 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_139 = NEG(op_LSHIFT_138); + RzILOpPure *op_LSHIFT_144 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_147 = SUB(op_LSHIFT_144, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_148 = ITE(op_LT_133, op_NEG_139, op_SUB_147); + RzILOpEffect *gcc_expr_149 = BRANCH(op_EQ_69, EMPTY(), set_usr_field_call_101); + + // h_tmp120 = HYB(gcc_expr_if ((sextract64(((ut64) ((extract64(((ut64) Rt), 0x0, 0x5) == ((ut64) 0x0)) ? ((st64) Rs) : ((st64) Rs) + ((st64) (0x1 << extract64(((ut64) Rt), 0x0, 0x5) - ((ut64) 0x1))))), 0x0, 0x20) == ((extract64(((ut64) Rt), 0x0, 0x5) == ((ut64) 0x0)) ? ((st64) Rs) : ((st64) Rs) + ((st64) (0x1 << extract64(((ut64) Rt), 0x0, 0x5) - ((ut64) 0x1)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((extract64(((ut64) Rt), 0x0, 0x5) == ((ut64) 0x0)) ? ((st64) Rs) : ((st64) Rs) + ((st64) (0x1 << extract64(((ut64) Rt), 0x0, 0x5) - ((ut64) 0x1)))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_151 = SETL("h_tmp120", cond_148); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((extract64(((ut64) Rt) ...; + RzILOpEffect *seq_152 = SEQN(2, gcc_expr_149, op_ASSIGN_hybrid_tmp_151); + + // Rd = ((st32) (((sextract64(((ut64) ((extract64(((ut64) Rt), 0x0, 0x5) == ((ut64) 0x0)) ? ((st64) Rs) : ((st64) Rs) + ((st64) (0x1 << extract64(((ut64) Rt), 0x0, 0x5) - ((ut64) 0x1))))), 0x0, 0x20) == ((extract64(((ut64) Rt), 0x0, 0x5) == ((ut64) 0x0)) ? ((st64) Rs) : ((st64) Rs) + ((st64) (0x1 << extract64(((ut64) Rt), 0x0, 0x5) - ((ut64) 0x1))))) ? ((extract64(((ut64) Rt), 0x0, 0x5) == ((ut64) 0x0)) ? ((st64) Rs) : ((st64) Rs) + ((st64) (0x1 << extract64(((ut64) Rt), 0x0, 0x5) - ((ut64) 0x1)))) : h_tmp120) >> extract64(((ut64) Rt), 0x0, 0x5))); + RzILOpPure *op_EQ_80 = EQ(EXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 5)), CAST(64, IL_FALSE, SN(32, 0))); + RzILOpPure *op_SUB_93 = SUB(EXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 5)), CAST(64, IL_FALSE, SN(32, 1))); + RzILOpPure *op_LSHIFT_94 = SHIFTL0(SN(32, 1), op_SUB_93); + RzILOpPure *op_ADD_96 = ADD(CAST(64, MSB(DUP(Rs)), DUP(Rs)), CAST(64, MSB(op_LSHIFT_94), DUP(op_LSHIFT_94))); + RzILOpPure *cond_98 = ITE(op_EQ_80, CAST(64, MSB(DUP(Rs)), DUP(Rs)), op_ADD_96); + RzILOpPure *cond_153 = ITE(DUP(op_EQ_69), cond_98, VARL("h_tmp120")); + RzILOpPure *op_RSHIFT_162 = SHIFTRA(cond_153, EXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 5))); + RzILOpEffect *op_ASSIGN_164 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(op_RSHIFT_162), DUP(op_RSHIFT_162))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((extract64(((ut64) ...; + RzILOpEffect *seq_165 = SEQN(2, seq_152, op_ASSIGN_164); + + RzILOpEffect *instruction_sequence = seq_165; + return instruction_sequence; +} + +// Rdd = sub(Rss,Rtt,Px):carry +RzILOpEffect *hex_il_op_a4_subp_c(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rdd = Css +RzILOpEffect *hex_il_op_a4_tfrcpp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Css_op = ISA2REG(hi, 's', false); + RzILOpPure *Css = READ_REG(pkt, Css_op, false); + + // Rdd = Css; + RzILOpEffect *op_ASSIGN_2 = WRITE_REG(bundle, Rdd_op, Css); + + RzILOpEffect *instruction_sequence = op_ASSIGN_2; + return instruction_sequence; +} + +// Cdd = Rss +RzILOpEffect *hex_il_op_a4_tfrpcp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Cdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // Cdd = Rss; + RzILOpEffect *op_ASSIGN_2 = WRITE_REG(bundle, Cdd_op, Rss); + + RzILOpEffect *instruction_sequence = op_ASSIGN_2; + return instruction_sequence; +} + +// Pd = tlbmatch(Rss,Rt) +RzILOpEffect *hex_il_op_a4_tlbmatch(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 TLBHI; + // Declare: ut32 TLBLO; + // Declare: ut32 MASK; + // Declare: ut32 SIZE; + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // MASK = ((ut32) 0x7ffffff); + RzILOpEffect *op_ASSIGN_6 = SETL("MASK", CAST(32, IL_FALSE, SN(32, 0x7ffffff))); + + // TLBLO = ((ut32) ((ut64) ((ut32) ((Rss >> 0x0) & 0xffffffff)))); + RzILOpPure *op_RSHIFT_11 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_13 = LOGAND(op_RSHIFT_11, SN(64, 0xffffffff)); + RzILOpEffect *op_ASSIGN_17 = SETL("TLBLO", CAST(32, IL_FALSE, CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_13)))); + + // TLBHI = ((ut32) ((ut64) ((ut32) ((Rss >> 0x20) & 0xffffffff)))); + RzILOpPure *op_RSHIFT_21 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_23 = LOGAND(op_RSHIFT_21, SN(64, 0xffffffff)); + RzILOpEffect *op_ASSIGN_27 = SETL("TLBHI", CAST(32, IL_FALSE, CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_23)))); + + // revbit32(TLBLO); + RzILOpEffect *revbit32_call_29 = hex_revbit32(VARL("TLBLO")); + + // h_tmp121 = revbit32(TLBLO); + RzILOpEffect *op_ASSIGN_hybrid_tmp_31 = SETL("h_tmp121", UNSIGNED(32, VARL("ret_val"))); + + // seq(revbit32(TLBLO); h_tmp121 = revbit32(TLBLO)); + RzILOpEffect *seq_32 = SEQN(2, revbit32_call_29, op_ASSIGN_hybrid_tmp_31); + + // clo32((~h_tmp121)); + RzILOpPure *op_NOT_33 = LOGNOT(VARL("h_tmp121")); + RzILOpEffect *clo32_call_34 = hex_clo32(op_NOT_33); + + // h_tmp122 = clo32((~h_tmp121)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_36 = SETL("h_tmp122", UNSIGNED(32, VARL("ret_val"))); + + // seq(clo32((~h_tmp121)); h_tmp122 = clo32((~h_tmp121))); + RzILOpEffect *seq_37 = SEQN(2, clo32_call_34, op_ASSIGN_hybrid_tmp_36); + + // seq(seq(revbit32(TLBLO); h_tmp121 = revbit32(TLBLO)); seq(clo32( ...; + RzILOpEffect *seq_38 = SEQN(2, seq_32, seq_37); + + // revbit32(TLBLO); + RzILOpEffect *revbit32_call_42 = hex_revbit32(VARL("TLBLO")); + + // h_tmp123 = revbit32(TLBLO); + RzILOpEffect *op_ASSIGN_hybrid_tmp_44 = SETL("h_tmp123", UNSIGNED(32, VARL("ret_val"))); + + // seq(revbit32(TLBLO); h_tmp123 = revbit32(TLBLO)); + RzILOpEffect *seq_45 = SEQN(2, revbit32_call_42, op_ASSIGN_hybrid_tmp_44); + + // clo32((~h_tmp123)); + RzILOpPure *op_NOT_46 = LOGNOT(VARL("h_tmp123")); + RzILOpEffect *clo32_call_47 = hex_clo32(op_NOT_46); + + // h_tmp124 = clo32((~h_tmp123)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_49 = SETL("h_tmp124", UNSIGNED(32, VARL("ret_val"))); + + // seq(clo32((~h_tmp123)); h_tmp124 = clo32((~h_tmp123))); + RzILOpEffect *seq_50 = SEQN(2, clo32_call_47, op_ASSIGN_hybrid_tmp_49); + + // seq(seq(revbit32(TLBLO); h_tmp123 = revbit32(TLBLO)); seq(clo32( ...; + RzILOpEffect *seq_51 = SEQN(2, seq_45, seq_50); + + // SIZE = ((((ut32) 0x6) < h_tmp122) ? ((ut32) 0x6) : h_tmp124); + RzILOpPure *op_LT_40 = ULT(CAST(32, IL_FALSE, SN(32, 6)), VARL("h_tmp122")); + RzILOpPure *cond_53 = ITE(op_LT_40, CAST(32, IL_FALSE, SN(32, 6)), VARL("h_tmp124")); + RzILOpEffect *op_ASSIGN_54 = SETL("SIZE", cond_53); + + // seq(seq(seq(revbit32(TLBLO); h_tmp121 = revbit32(TLBLO)); seq(cl ...; + RzILOpEffect *seq_55 = SEQN(3, seq_38, seq_51, op_ASSIGN_54); + + // MASK = (MASK & ((ut32) (0xffffffff << ((ut32) 0x2) * SIZE))); + RzILOpPure *op_MUL_59 = MUL(CAST(32, IL_FALSE, SN(32, 2)), VARL("SIZE")); + RzILOpPure *op_LSHIFT_60 = SHIFTL0(SN(32, 0xffffffff), op_MUL_59); + RzILOpPure *op_AND_62 = LOGAND(VARL("MASK"), CAST(32, IL_FALSE, op_LSHIFT_60)); + RzILOpEffect *op_ASSIGN_AND_63 = SETL("MASK", op_AND_62); + + // Pd = ((st8) ((((TLBHI >> 0x1f) & ((ut32) 0x1)) && ((ut32) ((TLBHI & MASK) == (((ut32) Rt) & MASK)))) ? 0xff : 0x0)); + RzILOpPure *op_RSHIFT_66 = SHIFTR0(VARL("TLBHI"), SN(32, 31)); + RzILOpPure *op_AND_69 = LOGAND(op_RSHIFT_66, CAST(32, IL_FALSE, SN(32, 1))); + RzILOpPure *op_AND_70 = LOGAND(VARL("TLBHI"), VARL("MASK")); + RzILOpPure *op_AND_73 = LOGAND(CAST(32, IL_FALSE, Rt), VARL("MASK")); + RzILOpPure *op_EQ_74 = EQ(op_AND_70, op_AND_73); + RzILOpPure *op_AND_76 = AND(NON_ZERO(op_AND_69), NON_ZERO(CAST(32, IL_FALSE, op_EQ_74))); + RzILOpPure *cond_79 = ITE(op_AND_76, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_81 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_79), DUP(cond_79))); + + RzILOpEffect *instruction_sequence = SEQN(6, op_ASSIGN_6, op_ASSIGN_17, op_ASSIGN_27, seq_55, op_ASSIGN_AND_63, op_ASSIGN_81); + return instruction_sequence; +} + +// Pd = any8(vcmpb.eq(Rss,Rtt)) +RzILOpEffect *hex_il_op_a4_vcmpbeq_any(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Pd = ((st8) 0x0); + RzILOpEffect *op_ASSIGN_4 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(SN(32, 0)), SN(32, 0))); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_6 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_9 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp125 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_11 = SETL("h_tmp125", VARL("i")); + + // seq(h_tmp125 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_12 = SEQN(2, op_ASSIGN_hybrid_tmp_11, op_INC_9); + + // Pd = ((st8) 0xff); + RzILOpEffect *op_ASSIGN_32 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(SN(32, 0xff)), SN(32, 0xff))); + + // seq(Pd = ((st8) 0xff)); + RzILOpEffect *seq_then_33 = op_ASSIGN_32; + + // if ((((st8) ((Rss >> i * 0x8) & ((st64) 0xff))) == ((st8) ((Rtt >> i * 0x8) & ((st64) 0xff))))) {seq(Pd = ((st8) 0xff))} else {{}}; + RzILOpPure *op_MUL_15 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rss, op_MUL_15); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_16, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_23 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_24 = SHIFTRA(Rtt, op_MUL_23); + RzILOpPure *op_AND_27 = LOGAND(op_RSHIFT_24, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_EQ_29 = EQ(CAST(8, MSB(op_AND_19), DUP(op_AND_19)), CAST(8, MSB(op_AND_27), DUP(op_AND_27))); + RzILOpEffect *branch_34 = BRANCH(op_EQ_29, seq_then_33, EMPTY()); + + // seq(h_tmp125; if ((((st8) ((Rss >> i * 0x8) & ((st64) 0xff))) == ...; + RzILOpEffect *seq_35 = branch_34; + + // seq(seq(h_tmp125; if ((((st8) ((Rss >> i * 0x8) & ((st64) 0xff)) ...; + RzILOpEffect *seq_36 = SEQN(2, seq_35, seq_12); + + // while ((i < 0x8)) { seq(seq(h_tmp125; if ((((st8) ((Rss >> i * 0x8) & ((st64) 0xff)) ... }; + RzILOpPure *op_LT_8 = SLT(VARL("i"), SN(32, 8)); + RzILOpEffect *for_37 = REPEAT(op_LT_8, seq_36); + + // seq(i = 0x0; while ((i < 0x8)) { seq(seq(h_tmp125; if ((((st8) ( ...; + RzILOpEffect *seq_38 = SEQN(2, op_ASSIGN_6, for_37); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_4, seq_38); + return instruction_sequence; +} + +// Pd = vcmpb.eq(Rss,Ii) +RzILOpEffect *hex_il_op_a4_vcmpbeqi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp126 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp126", VARL("i")); + + // seq(h_tmp126 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // u = u; + RzILOpEffect *imm_assign_24 = SETL("u", u); + + // Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << i))) | (((((ut32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) == u) ? 0x1 : 0x0) << i))); + RzILOpPure *op_LSHIFT_11 = SHIFTL0(UN(64, 1), VARL("i")); + RzILOpPure *op_NOT_12 = LOGNOT(op_LSHIFT_11); + RzILOpPure *op_AND_15 = LOGAND(CAST(64, IL_FALSE, CAST(32, MSB(READ_REG(pkt, Pd_op, true)), READ_REG(pkt, Pd_op, true))), op_NOT_12); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rss, op_MUL_18); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_EQ_27 = EQ(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_22)), VARL("u")); + RzILOpPure *ite_cast_ut64_28 = ITE(op_EQ_27, UN(64, 1), UN(64, 0)); + RzILOpPure *op_LSHIFT_29 = SHIFTL0(ite_cast_ut64_28, VARL("i")); + RzILOpPure *op_OR_30 = LOGOR(op_AND_15, op_LSHIFT_29); + RzILOpEffect *op_ASSIGN_32 = WRITE_REG(bundle, Pd_op, CAST(8, IL_FALSE, op_OR_30)); + + // seq(h_tmp126; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << i)) ...; + RzILOpEffect *seq_34 = op_ASSIGN_32; + + // seq(seq(h_tmp126; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ...; + RzILOpEffect *seq_35 = SEQN(2, seq_34, seq_8); + + // while ((i < 0x8)) { seq(seq(h_tmp126; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 8)); + RzILOpEffect *for_36 = REPEAT(op_LT_4, seq_35); + + // seq(i = 0x0; while ((i < 0x8)) { seq(seq(h_tmp126; Pd = ((st8) ( ...; + RzILOpEffect *seq_37 = SEQN(2, op_ASSIGN_2, for_36); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_24, seq_37); + return instruction_sequence; +} + +// Pd = vcmpb.gt(Rss,Rtt) +RzILOpEffect *hex_il_op_a4_vcmpbgt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp127 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp127", VARL("i")); + + // seq(h_tmp127 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << i))) | (((((st8) ((Rss >> i * 0x8) & ((st64) 0xff))) > ((st8) ((Rtt >> i * 0x8) & ((st64) 0xff)))) ? 0x1 : 0x0) << i))); + RzILOpPure *op_LSHIFT_11 = SHIFTL0(UN(64, 1), VARL("i")); + RzILOpPure *op_NOT_12 = LOGNOT(op_LSHIFT_11); + RzILOpPure *op_AND_15 = LOGAND(CAST(64, IL_FALSE, CAST(32, MSB(READ_REG(pkt, Pd_op, true)), READ_REG(pkt, Pd_op, true))), op_NOT_12); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rss, op_MUL_18); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rtt, op_MUL_26); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_27, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_GT_32 = SGT(CAST(8, MSB(op_AND_22), DUP(op_AND_22)), CAST(8, MSB(op_AND_30), DUP(op_AND_30))); + RzILOpPure *ite_cast_ut64_33 = ITE(op_GT_32, UN(64, 1), UN(64, 0)); + RzILOpPure *op_LSHIFT_34 = SHIFTL0(ite_cast_ut64_33, VARL("i")); + RzILOpPure *op_OR_35 = LOGOR(op_AND_15, op_LSHIFT_34); + RzILOpEffect *op_ASSIGN_37 = WRITE_REG(bundle, Pd_op, CAST(8, IL_FALSE, op_OR_35)); + + // seq(h_tmp127; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << i)) ...; + RzILOpEffect *seq_39 = op_ASSIGN_37; + + // seq(seq(h_tmp127; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ...; + RzILOpEffect *seq_40 = SEQN(2, seq_39, seq_8); + + // while ((i < 0x8)) { seq(seq(h_tmp127; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 8)); + RzILOpEffect *for_41 = REPEAT(op_LT_4, seq_40); + + // seq(i = 0x0; while ((i < 0x8)) { seq(seq(h_tmp127; Pd = ((st8) ( ...; + RzILOpEffect *seq_42 = SEQN(2, op_ASSIGN_2, for_41); + + RzILOpEffect *instruction_sequence = seq_42; + return instruction_sequence; +} + +// Pd = vcmpb.gt(Rss,Ii) +RzILOpEffect *hex_il_op_a4_vcmpbgti(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp128 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp128", VARL("i")); + + // seq(h_tmp128 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // s = s; + RzILOpEffect *imm_assign_24 = SETL("s", s); + + // Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << i))) | (((((st32) ((st8) ((Rss >> i * 0x8) & ((st64) 0xff)))) > s) ? 0x1 : 0x0) << i))); + RzILOpPure *op_LSHIFT_11 = SHIFTL0(UN(64, 1), VARL("i")); + RzILOpPure *op_NOT_12 = LOGNOT(op_LSHIFT_11); + RzILOpPure *op_AND_15 = LOGAND(CAST(64, IL_FALSE, CAST(32, MSB(READ_REG(pkt, Pd_op, true)), READ_REG(pkt, Pd_op, true))), op_NOT_12); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rss, op_MUL_18); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_GT_27 = SGT(CAST(32, MSB(CAST(8, MSB(op_AND_22), DUP(op_AND_22))), CAST(8, MSB(DUP(op_AND_22)), DUP(op_AND_22))), VARL("s")); + RzILOpPure *ite_cast_ut64_28 = ITE(op_GT_27, UN(64, 1), UN(64, 0)); + RzILOpPure *op_LSHIFT_29 = SHIFTL0(ite_cast_ut64_28, VARL("i")); + RzILOpPure *op_OR_30 = LOGOR(op_AND_15, op_LSHIFT_29); + RzILOpEffect *op_ASSIGN_32 = WRITE_REG(bundle, Pd_op, CAST(8, IL_FALSE, op_OR_30)); + + // seq(h_tmp128; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << i)) ...; + RzILOpEffect *seq_34 = op_ASSIGN_32; + + // seq(seq(h_tmp128; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ...; + RzILOpEffect *seq_35 = SEQN(2, seq_34, seq_8); + + // while ((i < 0x8)) { seq(seq(h_tmp128; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 8)); + RzILOpEffect *for_36 = REPEAT(op_LT_4, seq_35); + + // seq(i = 0x0; while ((i < 0x8)) { seq(seq(h_tmp128; Pd = ((st8) ( ...; + RzILOpEffect *seq_37 = SEQN(2, op_ASSIGN_2, for_36); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_24, seq_37); + return instruction_sequence; +} + +// Pd = vcmpb.gtu(Rss,Ii) +RzILOpEffect *hex_il_op_a4_vcmpbgtui(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp129 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp129", VARL("i")); + + // seq(h_tmp129 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // u = u; + RzILOpEffect *imm_assign_24 = SETL("u", u); + + // Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << i))) | (((((ut32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) > u) ? 0x1 : 0x0) << i))); + RzILOpPure *op_LSHIFT_11 = SHIFTL0(UN(64, 1), VARL("i")); + RzILOpPure *op_NOT_12 = LOGNOT(op_LSHIFT_11); + RzILOpPure *op_AND_15 = LOGAND(CAST(64, IL_FALSE, CAST(32, MSB(READ_REG(pkt, Pd_op, true)), READ_REG(pkt, Pd_op, true))), op_NOT_12); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rss, op_MUL_18); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_GT_27 = UGT(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_22)), VARL("u")); + RzILOpPure *ite_cast_ut64_28 = ITE(op_GT_27, UN(64, 1), UN(64, 0)); + RzILOpPure *op_LSHIFT_29 = SHIFTL0(ite_cast_ut64_28, VARL("i")); + RzILOpPure *op_OR_30 = LOGOR(op_AND_15, op_LSHIFT_29); + RzILOpEffect *op_ASSIGN_32 = WRITE_REG(bundle, Pd_op, CAST(8, IL_FALSE, op_OR_30)); + + // seq(h_tmp129; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << i)) ...; + RzILOpEffect *seq_34 = op_ASSIGN_32; + + // seq(seq(h_tmp129; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ...; + RzILOpEffect *seq_35 = SEQN(2, seq_34, seq_8); + + // while ((i < 0x8)) { seq(seq(h_tmp129; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 8)); + RzILOpEffect *for_36 = REPEAT(op_LT_4, seq_35); + + // seq(i = 0x0; while ((i < 0x8)) { seq(seq(h_tmp129; Pd = ((st8) ( ...; + RzILOpEffect *seq_37 = SEQN(2, op_ASSIGN_2, for_36); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_24, seq_37); + return instruction_sequence; +} + +// Pd = vcmph.eq(Rss,Ii) +RzILOpEffect *hex_il_op_a4_vcmpheqi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp130 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp130", VARL("i")); + + // seq(h_tmp130 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // s = s; + RzILOpEffect *imm_assign_26 = SETL("s", s); + + // Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << i * 0x2))) | (((((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) == s) ? 0x1 : 0x0) << i * 0x2))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(UN(64, 1), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_17 = LOGAND(CAST(64, IL_FALSE, CAST(32, MSB(READ_REG(pkt, Pd_op, true)), READ_REG(pkt, Pd_op, true))), op_NOT_14); + RzILOpPure *op_MUL_20 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_21 = SHIFTRA(Rss, op_MUL_20); + RzILOpPure *op_AND_24 = LOGAND(op_RSHIFT_21, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_EQ_29 = EQ(CAST(32, MSB(CAST(16, MSB(op_AND_24), DUP(op_AND_24))), CAST(16, MSB(DUP(op_AND_24)), DUP(op_AND_24))), VARL("s")); + RzILOpPure *ite_cast_ut64_30 = ITE(op_EQ_29, UN(64, 1), UN(64, 0)); + RzILOpPure *op_MUL_32 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_LSHIFT_33 = SHIFTL0(ite_cast_ut64_30, op_MUL_32); + RzILOpPure *op_OR_34 = LOGOR(op_AND_17, op_LSHIFT_33); + RzILOpEffect *op_ASSIGN_36 = WRITE_REG(bundle, Pd_op, CAST(8, IL_FALSE, op_OR_34)); + + // Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << i * 0x2 + 0x1))) | (((((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) == s) ? 0x1 : 0x0) << i * 0x2 + 0x1))); + RzILOpPure *op_MUL_40 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_ADD_42 = ADD(op_MUL_40, SN(32, 1)); + RzILOpPure *op_LSHIFT_43 = SHIFTL0(UN(64, 1), op_ADD_42); + RzILOpPure *op_NOT_44 = LOGNOT(op_LSHIFT_43); + RzILOpPure *op_AND_47 = LOGAND(CAST(64, IL_FALSE, CAST(32, MSB(READ_REG(pkt, Pd_op, true)), READ_REG(pkt, Pd_op, true))), op_NOT_44); + RzILOpPure *op_MUL_49 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_50 = SHIFTRA(DUP(Rss), op_MUL_49); + RzILOpPure *op_AND_53 = LOGAND(op_RSHIFT_50, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_EQ_56 = EQ(CAST(32, MSB(CAST(16, MSB(op_AND_53), DUP(op_AND_53))), CAST(16, MSB(DUP(op_AND_53)), DUP(op_AND_53))), VARL("s")); + RzILOpPure *ite_cast_ut64_57 = ITE(op_EQ_56, UN(64, 1), UN(64, 0)); + RzILOpPure *op_MUL_59 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_ADD_61 = ADD(op_MUL_59, SN(32, 1)); + RzILOpPure *op_LSHIFT_62 = SHIFTL0(ite_cast_ut64_57, op_ADD_61); + RzILOpPure *op_OR_63 = LOGOR(op_AND_47, op_LSHIFT_62); + RzILOpEffect *op_ASSIGN_65 = WRITE_REG(bundle, Pd_op, CAST(8, IL_FALSE, op_OR_63)); + + // seq(h_tmp130; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << i * ...; + RzILOpEffect *seq_67 = SEQN(2, op_ASSIGN_36, op_ASSIGN_65); + + // seq(seq(h_tmp130; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ...; + RzILOpEffect *seq_68 = SEQN(2, seq_67, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp130; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_69 = REPEAT(op_LT_4, seq_68); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp130; Pd = ((st8) ( ...; + RzILOpEffect *seq_70 = SEQN(2, op_ASSIGN_2, for_69); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_26, seq_70); + return instruction_sequence; +} + +// Pd = vcmph.gt(Rss,Ii) +RzILOpEffect *hex_il_op_a4_vcmphgti(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp131 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp131", VARL("i")); + + // seq(h_tmp131 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // s = s; + RzILOpEffect *imm_assign_26 = SETL("s", s); + + // Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << i * 0x2))) | (((((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) > s) ? 0x1 : 0x0) << i * 0x2))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(UN(64, 1), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_17 = LOGAND(CAST(64, IL_FALSE, CAST(32, MSB(READ_REG(pkt, Pd_op, true)), READ_REG(pkt, Pd_op, true))), op_NOT_14); + RzILOpPure *op_MUL_20 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_21 = SHIFTRA(Rss, op_MUL_20); + RzILOpPure *op_AND_24 = LOGAND(op_RSHIFT_21, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_GT_29 = SGT(CAST(32, MSB(CAST(16, MSB(op_AND_24), DUP(op_AND_24))), CAST(16, MSB(DUP(op_AND_24)), DUP(op_AND_24))), VARL("s")); + RzILOpPure *ite_cast_ut64_30 = ITE(op_GT_29, UN(64, 1), UN(64, 0)); + RzILOpPure *op_MUL_32 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_LSHIFT_33 = SHIFTL0(ite_cast_ut64_30, op_MUL_32); + RzILOpPure *op_OR_34 = LOGOR(op_AND_17, op_LSHIFT_33); + RzILOpEffect *op_ASSIGN_36 = WRITE_REG(bundle, Pd_op, CAST(8, IL_FALSE, op_OR_34)); + + // Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << i * 0x2 + 0x1))) | (((((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) > s) ? 0x1 : 0x0) << i * 0x2 + 0x1))); + RzILOpPure *op_MUL_40 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_ADD_42 = ADD(op_MUL_40, SN(32, 1)); + RzILOpPure *op_LSHIFT_43 = SHIFTL0(UN(64, 1), op_ADD_42); + RzILOpPure *op_NOT_44 = LOGNOT(op_LSHIFT_43); + RzILOpPure *op_AND_47 = LOGAND(CAST(64, IL_FALSE, CAST(32, MSB(READ_REG(pkt, Pd_op, true)), READ_REG(pkt, Pd_op, true))), op_NOT_44); + RzILOpPure *op_MUL_49 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_50 = SHIFTRA(DUP(Rss), op_MUL_49); + RzILOpPure *op_AND_53 = LOGAND(op_RSHIFT_50, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_GT_56 = SGT(CAST(32, MSB(CAST(16, MSB(op_AND_53), DUP(op_AND_53))), CAST(16, MSB(DUP(op_AND_53)), DUP(op_AND_53))), VARL("s")); + RzILOpPure *ite_cast_ut64_57 = ITE(op_GT_56, UN(64, 1), UN(64, 0)); + RzILOpPure *op_MUL_59 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_ADD_61 = ADD(op_MUL_59, SN(32, 1)); + RzILOpPure *op_LSHIFT_62 = SHIFTL0(ite_cast_ut64_57, op_ADD_61); + RzILOpPure *op_OR_63 = LOGOR(op_AND_47, op_LSHIFT_62); + RzILOpEffect *op_ASSIGN_65 = WRITE_REG(bundle, Pd_op, CAST(8, IL_FALSE, op_OR_63)); + + // seq(h_tmp131; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << i * ...; + RzILOpEffect *seq_67 = SEQN(2, op_ASSIGN_36, op_ASSIGN_65); + + // seq(seq(h_tmp131; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ...; + RzILOpEffect *seq_68 = SEQN(2, seq_67, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp131; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_69 = REPEAT(op_LT_4, seq_68); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp131; Pd = ((st8) ( ...; + RzILOpEffect *seq_70 = SEQN(2, op_ASSIGN_2, for_69); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_26, seq_70); + return instruction_sequence; +} + +// Pd = vcmph.gtu(Rss,Ii) +RzILOpEffect *hex_il_op_a4_vcmphgtui(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp132 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp132", VARL("i")); + + // seq(h_tmp132 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // u = u; + RzILOpEffect *imm_assign_26 = SETL("u", u); + + // Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << i * 0x2))) | (((((ut32) ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) > u) ? 0x1 : 0x0) << i * 0x2))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(UN(64, 1), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_17 = LOGAND(CAST(64, IL_FALSE, CAST(32, MSB(READ_REG(pkt, Pd_op, true)), READ_REG(pkt, Pd_op, true))), op_NOT_14); + RzILOpPure *op_MUL_20 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_21 = SHIFTRA(Rss, op_MUL_20); + RzILOpPure *op_AND_24 = LOGAND(op_RSHIFT_21, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_GT_29 = UGT(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_24)), VARL("u")); + RzILOpPure *ite_cast_ut64_30 = ITE(op_GT_29, UN(64, 1), UN(64, 0)); + RzILOpPure *op_MUL_32 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_LSHIFT_33 = SHIFTL0(ite_cast_ut64_30, op_MUL_32); + RzILOpPure *op_OR_34 = LOGOR(op_AND_17, op_LSHIFT_33); + RzILOpEffect *op_ASSIGN_36 = WRITE_REG(bundle, Pd_op, CAST(8, IL_FALSE, op_OR_34)); + + // Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << i * 0x2 + 0x1))) | (((((ut32) ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) > u) ? 0x1 : 0x0) << i * 0x2 + 0x1))); + RzILOpPure *op_MUL_40 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_ADD_42 = ADD(op_MUL_40, SN(32, 1)); + RzILOpPure *op_LSHIFT_43 = SHIFTL0(UN(64, 1), op_ADD_42); + RzILOpPure *op_NOT_44 = LOGNOT(op_LSHIFT_43); + RzILOpPure *op_AND_47 = LOGAND(CAST(64, IL_FALSE, CAST(32, MSB(READ_REG(pkt, Pd_op, true)), READ_REG(pkt, Pd_op, true))), op_NOT_44); + RzILOpPure *op_MUL_49 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_50 = SHIFTRA(DUP(Rss), op_MUL_49); + RzILOpPure *op_AND_53 = LOGAND(op_RSHIFT_50, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_GT_56 = UGT(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_53)), VARL("u")); + RzILOpPure *ite_cast_ut64_57 = ITE(op_GT_56, UN(64, 1), UN(64, 0)); + RzILOpPure *op_MUL_59 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_ADD_61 = ADD(op_MUL_59, SN(32, 1)); + RzILOpPure *op_LSHIFT_62 = SHIFTL0(ite_cast_ut64_57, op_ADD_61); + RzILOpPure *op_OR_63 = LOGOR(op_AND_47, op_LSHIFT_62); + RzILOpEffect *op_ASSIGN_65 = WRITE_REG(bundle, Pd_op, CAST(8, IL_FALSE, op_OR_63)); + + // seq(h_tmp132; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << i * ...; + RzILOpEffect *seq_67 = SEQN(2, op_ASSIGN_36, op_ASSIGN_65); + + // seq(seq(h_tmp132; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ...; + RzILOpEffect *seq_68 = SEQN(2, seq_67, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp132; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_69 = REPEAT(op_LT_4, seq_68); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp132; Pd = ((st8) ( ...; + RzILOpEffect *seq_70 = SEQN(2, op_ASSIGN_2, for_69); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_26, seq_70); + return instruction_sequence; +} + +// Pd = vcmpw.eq(Rss,Ii) +RzILOpEffect *hex_il_op_a4_vcmpweqi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 j; + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + + // j = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("j", SN(32, 0)); + + // HYB(++j); + RzILOpEffect *op_INC_5 = SETL("j", INC(VARL("j"), 32)); + + // h_tmp133 = HYB(++j); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp133", VARL("j")); + + // seq(h_tmp133 = HYB(++j); HYB(++j)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // s = s; + RzILOpEffect *imm_assign_25 = SETL("s", s); + + // Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << j))) | (((((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))) == ((st64) s)) ? 0x1 : 0x0) << j))); + RzILOpPure *op_LSHIFT_11 = SHIFTL0(UN(64, 1), VARL("j")); + RzILOpPure *op_NOT_12 = LOGNOT(op_LSHIFT_11); + RzILOpPure *op_AND_15 = LOGAND(CAST(64, IL_FALSE, CAST(32, MSB(READ_REG(pkt, Pd_op, true)), READ_REG(pkt, Pd_op, true))), op_NOT_12); + RzILOpPure *op_RSHIFT_20 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_20, SN(64, 0xffffffff)); + RzILOpPure *op_EQ_28 = EQ(CAST(64, MSB(CAST(32, MSB(op_AND_22), DUP(op_AND_22))), CAST(32, MSB(DUP(op_AND_22)), DUP(op_AND_22))), CAST(64, MSB(VARL("s")), VARL("s"))); + RzILOpPure *ite_cast_ut64_29 = ITE(op_EQ_28, UN(64, 1), UN(64, 0)); + RzILOpPure *op_LSHIFT_30 = SHIFTL0(ite_cast_ut64_29, VARL("j")); + RzILOpPure *op_OR_31 = LOGOR(op_AND_15, op_LSHIFT_30); + RzILOpEffect *op_ASSIGN_33 = WRITE_REG(bundle, Pd_op, CAST(8, IL_FALSE, op_OR_31)); + + // seq(h_tmp133; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << j)) ...; + RzILOpEffect *seq_35 = op_ASSIGN_33; + + // seq(seq(h_tmp133; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ...; + RzILOpEffect *seq_36 = SEQN(2, seq_35, seq_8); + + // while ((j <= 0x3)) { seq(seq(h_tmp133; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ... }; + RzILOpPure *op_LE_4 = SLE(VARL("j"), SN(32, 3)); + RzILOpEffect *for_37 = REPEAT(op_LE_4, seq_36); + + // seq(j = 0x0; while ((j <= 0x3)) { seq(seq(h_tmp133; Pd = ((st8) ...; + RzILOpEffect *seq_38 = SEQN(2, op_ASSIGN_2, for_37); + + // j = 0x4; + RzILOpEffect *op_ASSIGN_41 = SETL("j", SN(32, 4)); + + // HYB(++j); + RzILOpEffect *op_INC_44 = SETL("j", INC(VARL("j"), 32)); + + // h_tmp134 = HYB(++j); + RzILOpEffect *op_ASSIGN_hybrid_tmp_46 = SETL("h_tmp134", VARL("j")); + + // seq(h_tmp134 = HYB(++j); HYB(++j)); + RzILOpEffect *seq_47 = SEQN(2, op_ASSIGN_hybrid_tmp_46, op_INC_44); + + // Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << j))) | (((((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))) == ((st64) s)) ? 0x1 : 0x0) << j))); + RzILOpPure *op_LSHIFT_49 = SHIFTL0(UN(64, 1), VARL("j")); + RzILOpPure *op_NOT_50 = LOGNOT(op_LSHIFT_49); + RzILOpPure *op_AND_53 = LOGAND(CAST(64, IL_FALSE, CAST(32, MSB(READ_REG(pkt, Pd_op, true)), READ_REG(pkt, Pd_op, true))), op_NOT_50); + RzILOpPure *op_RSHIFT_57 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_59 = LOGAND(op_RSHIFT_57, SN(64, 0xffffffff)); + RzILOpPure *op_EQ_63 = EQ(CAST(64, MSB(CAST(32, MSB(op_AND_59), DUP(op_AND_59))), CAST(32, MSB(DUP(op_AND_59)), DUP(op_AND_59))), CAST(64, MSB(VARL("s")), VARL("s"))); + RzILOpPure *ite_cast_ut64_64 = ITE(op_EQ_63, UN(64, 1), UN(64, 0)); + RzILOpPure *op_LSHIFT_65 = SHIFTL0(ite_cast_ut64_64, VARL("j")); + RzILOpPure *op_OR_66 = LOGOR(op_AND_53, op_LSHIFT_65); + RzILOpEffect *op_ASSIGN_68 = WRITE_REG(bundle, Pd_op, CAST(8, IL_FALSE, op_OR_66)); + + // seq(h_tmp134; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << j)) ...; + RzILOpEffect *seq_70 = op_ASSIGN_68; + + // seq(seq(h_tmp134; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ...; + RzILOpEffect *seq_71 = SEQN(2, seq_70, seq_47); + + // while ((j <= 0x7)) { seq(seq(h_tmp134; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ... }; + RzILOpPure *op_LE_43 = SLE(VARL("j"), SN(32, 7)); + RzILOpEffect *for_72 = REPEAT(op_LE_43, seq_71); + + // seq(j = 0x4; while ((j <= 0x7)) { seq(seq(h_tmp134; Pd = ((st8) ...; + RzILOpEffect *seq_73 = SEQN(2, op_ASSIGN_41, for_72); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_25, seq_38, seq_73); + return instruction_sequence; +} + +// Pd = vcmpw.gt(Rss,Ii) +RzILOpEffect *hex_il_op_a4_vcmpwgti(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 j; + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + + // j = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("j", SN(32, 0)); + + // HYB(++j); + RzILOpEffect *op_INC_5 = SETL("j", INC(VARL("j"), 32)); + + // h_tmp135 = HYB(++j); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp135", VARL("j")); + + // seq(h_tmp135 = HYB(++j); HYB(++j)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // s = s; + RzILOpEffect *imm_assign_25 = SETL("s", s); + + // Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << j))) | (((((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))) > ((st64) s)) ? 0x1 : 0x0) << j))); + RzILOpPure *op_LSHIFT_11 = SHIFTL0(UN(64, 1), VARL("j")); + RzILOpPure *op_NOT_12 = LOGNOT(op_LSHIFT_11); + RzILOpPure *op_AND_15 = LOGAND(CAST(64, IL_FALSE, CAST(32, MSB(READ_REG(pkt, Pd_op, true)), READ_REG(pkt, Pd_op, true))), op_NOT_12); + RzILOpPure *op_RSHIFT_20 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_20, SN(64, 0xffffffff)); + RzILOpPure *op_GT_28 = SGT(CAST(64, MSB(CAST(32, MSB(op_AND_22), DUP(op_AND_22))), CAST(32, MSB(DUP(op_AND_22)), DUP(op_AND_22))), CAST(64, MSB(VARL("s")), VARL("s"))); + RzILOpPure *ite_cast_ut64_29 = ITE(op_GT_28, UN(64, 1), UN(64, 0)); + RzILOpPure *op_LSHIFT_30 = SHIFTL0(ite_cast_ut64_29, VARL("j")); + RzILOpPure *op_OR_31 = LOGOR(op_AND_15, op_LSHIFT_30); + RzILOpEffect *op_ASSIGN_33 = WRITE_REG(bundle, Pd_op, CAST(8, IL_FALSE, op_OR_31)); + + // seq(h_tmp135; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << j)) ...; + RzILOpEffect *seq_35 = op_ASSIGN_33; + + // seq(seq(h_tmp135; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ...; + RzILOpEffect *seq_36 = SEQN(2, seq_35, seq_8); + + // while ((j <= 0x3)) { seq(seq(h_tmp135; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ... }; + RzILOpPure *op_LE_4 = SLE(VARL("j"), SN(32, 3)); + RzILOpEffect *for_37 = REPEAT(op_LE_4, seq_36); + + // seq(j = 0x0; while ((j <= 0x3)) { seq(seq(h_tmp135; Pd = ((st8) ...; + RzILOpEffect *seq_38 = SEQN(2, op_ASSIGN_2, for_37); + + // j = 0x4; + RzILOpEffect *op_ASSIGN_40 = SETL("j", SN(32, 4)); + + // HYB(++j); + RzILOpEffect *op_INC_43 = SETL("j", INC(VARL("j"), 32)); + + // h_tmp136 = HYB(++j); + RzILOpEffect *op_ASSIGN_hybrid_tmp_45 = SETL("h_tmp136", VARL("j")); + + // seq(h_tmp136 = HYB(++j); HYB(++j)); + RzILOpEffect *seq_46 = SEQN(2, op_ASSIGN_hybrid_tmp_45, op_INC_43); + + // Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << j))) | (((((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))) > ((st64) s)) ? 0x1 : 0x0) << j))); + RzILOpPure *op_LSHIFT_48 = SHIFTL0(UN(64, 1), VARL("j")); + RzILOpPure *op_NOT_49 = LOGNOT(op_LSHIFT_48); + RzILOpPure *op_AND_52 = LOGAND(CAST(64, IL_FALSE, CAST(32, MSB(READ_REG(pkt, Pd_op, true)), READ_REG(pkt, Pd_op, true))), op_NOT_49); + RzILOpPure *op_RSHIFT_56 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_58 = LOGAND(op_RSHIFT_56, SN(64, 0xffffffff)); + RzILOpPure *op_GT_62 = SGT(CAST(64, MSB(CAST(32, MSB(op_AND_58), DUP(op_AND_58))), CAST(32, MSB(DUP(op_AND_58)), DUP(op_AND_58))), CAST(64, MSB(VARL("s")), VARL("s"))); + RzILOpPure *ite_cast_ut64_63 = ITE(op_GT_62, UN(64, 1), UN(64, 0)); + RzILOpPure *op_LSHIFT_64 = SHIFTL0(ite_cast_ut64_63, VARL("j")); + RzILOpPure *op_OR_65 = LOGOR(op_AND_52, op_LSHIFT_64); + RzILOpEffect *op_ASSIGN_67 = WRITE_REG(bundle, Pd_op, CAST(8, IL_FALSE, op_OR_65)); + + // seq(h_tmp136; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << j)) ...; + RzILOpEffect *seq_69 = op_ASSIGN_67; + + // seq(seq(h_tmp136; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ...; + RzILOpEffect *seq_70 = SEQN(2, seq_69, seq_46); + + // while ((j <= 0x7)) { seq(seq(h_tmp136; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ... }; + RzILOpPure *op_LE_42 = SLE(VARL("j"), SN(32, 7)); + RzILOpEffect *for_71 = REPEAT(op_LE_42, seq_70); + + // seq(j = 0x4; while ((j <= 0x7)) { seq(seq(h_tmp136; Pd = ((st8) ...; + RzILOpEffect *seq_72 = SEQN(2, op_ASSIGN_40, for_71); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_25, seq_38, seq_72); + return instruction_sequence; +} + +// Pd = vcmpw.gtu(Rss,Ii) +RzILOpEffect *hex_il_op_a4_vcmpwgtui(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 j; + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // j = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("j", SN(32, 0)); + + // HYB(++j); + RzILOpEffect *op_INC_5 = SETL("j", INC(VARL("j"), 32)); + + // h_tmp137 = HYB(++j); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp137", VARL("j")); + + // seq(h_tmp137 = HYB(++j); HYB(++j)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // u = u; + RzILOpEffect *imm_assign_25 = SETL("u", u); + + // Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << j))) | (((((ut64) ((ut32) ((Rss >> 0x0) & 0xffffffff))) > ((ut64) u)) ? 0x1 : 0x0) << j))); + RzILOpPure *op_LSHIFT_11 = SHIFTL0(UN(64, 1), VARL("j")); + RzILOpPure *op_NOT_12 = LOGNOT(op_LSHIFT_11); + RzILOpPure *op_AND_15 = LOGAND(CAST(64, IL_FALSE, CAST(32, MSB(READ_REG(pkt, Pd_op, true)), READ_REG(pkt, Pd_op, true))), op_NOT_12); + RzILOpPure *op_RSHIFT_20 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_20, SN(64, 0xffffffff)); + RzILOpPure *op_GT_28 = UGT(CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_22)), CAST(64, IL_FALSE, VARL("u"))); + RzILOpPure *ite_cast_ut64_29 = ITE(op_GT_28, UN(64, 1), UN(64, 0)); + RzILOpPure *op_LSHIFT_30 = SHIFTL0(ite_cast_ut64_29, VARL("j")); + RzILOpPure *op_OR_31 = LOGOR(op_AND_15, op_LSHIFT_30); + RzILOpEffect *op_ASSIGN_33 = WRITE_REG(bundle, Pd_op, CAST(8, IL_FALSE, op_OR_31)); + + // seq(h_tmp137; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << j)) ...; + RzILOpEffect *seq_35 = op_ASSIGN_33; + + // seq(seq(h_tmp137; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ...; + RzILOpEffect *seq_36 = SEQN(2, seq_35, seq_8); + + // while ((j <= 0x3)) { seq(seq(h_tmp137; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ... }; + RzILOpPure *op_LE_4 = SLE(VARL("j"), SN(32, 3)); + RzILOpEffect *for_37 = REPEAT(op_LE_4, seq_36); + + // seq(j = 0x0; while ((j <= 0x3)) { seq(seq(h_tmp137; Pd = ((st8) ...; + RzILOpEffect *seq_38 = SEQN(2, op_ASSIGN_2, for_37); + + // j = 0x4; + RzILOpEffect *op_ASSIGN_40 = SETL("j", SN(32, 4)); + + // HYB(++j); + RzILOpEffect *op_INC_43 = SETL("j", INC(VARL("j"), 32)); + + // h_tmp138 = HYB(++j); + RzILOpEffect *op_ASSIGN_hybrid_tmp_45 = SETL("h_tmp138", VARL("j")); + + // seq(h_tmp138 = HYB(++j); HYB(++j)); + RzILOpEffect *seq_46 = SEQN(2, op_ASSIGN_hybrid_tmp_45, op_INC_43); + + // Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << j))) | (((((ut64) ((ut32) ((Rss >> 0x20) & 0xffffffff))) > ((ut64) u)) ? 0x1 : 0x0) << j))); + RzILOpPure *op_LSHIFT_48 = SHIFTL0(UN(64, 1), VARL("j")); + RzILOpPure *op_NOT_49 = LOGNOT(op_LSHIFT_48); + RzILOpPure *op_AND_52 = LOGAND(CAST(64, IL_FALSE, CAST(32, MSB(READ_REG(pkt, Pd_op, true)), READ_REG(pkt, Pd_op, true))), op_NOT_49); + RzILOpPure *op_RSHIFT_56 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_58 = LOGAND(op_RSHIFT_56, SN(64, 0xffffffff)); + RzILOpPure *op_GT_62 = UGT(CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_58)), CAST(64, IL_FALSE, VARL("u"))); + RzILOpPure *ite_cast_ut64_63 = ITE(op_GT_62, UN(64, 1), UN(64, 0)); + RzILOpPure *op_LSHIFT_64 = SHIFTL0(ite_cast_ut64_63, VARL("j")); + RzILOpPure *op_OR_65 = LOGOR(op_AND_52, op_LSHIFT_64); + RzILOpEffect *op_ASSIGN_67 = WRITE_REG(bundle, Pd_op, CAST(8, IL_FALSE, op_OR_65)); + + // seq(h_tmp138; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << j)) ...; + RzILOpEffect *seq_69 = op_ASSIGN_67; + + // seq(seq(h_tmp138; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ...; + RzILOpEffect *seq_70 = SEQN(2, seq_69, seq_46); + + // while ((j <= 0x7)) { seq(seq(h_tmp138; Pd = ((st8) ((((ut64) ((st32) Pd)) & (~(0x1 << ... }; + RzILOpPure *op_LE_42 = SLE(VARL("j"), SN(32, 7)); + RzILOpEffect *for_71 = REPEAT(op_LE_42, seq_70); + + // seq(j = 0x4; while ((j <= 0x7)) { seq(seq(h_tmp138; Pd = ((st8) ...; + RzILOpEffect *seq_72 = SEQN(2, op_ASSIGN_40, for_71); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_25, seq_38, seq_72); + return instruction_sequence; +} + +// Rxx = vrmaxh(Rss,Ru) +RzILOpEffect *hex_il_op_a4_vrmaxh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + // Declare: st64 max; + // Declare: st32 addr; + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + + // max = ((st64) ((st16) ((Rxx >> 0x0) & ((st64) 0xffff)))); + RzILOpPure *op_RSHIFT_7 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_7, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpEffect *op_ASSIGN_13 = SETL("max", CAST(64, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10)))); + + // addr = ((st32) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff)))); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(64, 0xffffffff)); + RzILOpEffect *op_ASSIGN_23 = SETL("addr", CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_19), DUP(op_AND_19))), CAST(32, MSB(DUP(op_AND_19)), DUP(op_AND_19)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_19)), DUP(op_AND_19))), CAST(32, MSB(DUP(op_AND_19)), DUP(op_AND_19))))); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_25 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_28 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp139 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_30 = SETL("h_tmp139", VARL("i")); + + // seq(h_tmp139 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_31 = SEQN(2, op_ASSIGN_hybrid_tmp_30, op_INC_28); + + // max = ((st64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))); + RzILOpPure *op_MUL_43 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_44 = SHIFTRA(Rss, op_MUL_43); + RzILOpPure *op_AND_47 = LOGAND(op_RSHIFT_44, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpEffect *op_ASSIGN_50 = SETL("max", CAST(64, MSB(CAST(16, MSB(op_AND_47), DUP(op_AND_47))), CAST(16, MSB(DUP(op_AND_47)), DUP(op_AND_47)))); + + // addr = (Ru | (i << 0x1)); + RzILOpPure *op_LSHIFT_53 = SHIFTL0(VARL("i"), SN(32, 1)); + RzILOpPure *op_OR_54 = LOGOR(Ru, op_LSHIFT_53); + RzILOpEffect *op_ASSIGN_55 = SETL("addr", op_OR_54); + + // seq(max = ((st64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))) ...; + RzILOpEffect *seq_then_56 = SEQN(2, op_ASSIGN_50, op_ASSIGN_55); + + // if ((max < ((st64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))))) {seq(max = ((st64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))) ...} else {{}}; + RzILOpPure *op_MUL_34 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rss), op_MUL_34); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_35, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LT_41 = SLT(VARL("max"), CAST(64, MSB(CAST(16, MSB(op_AND_38), DUP(op_AND_38))), CAST(16, MSB(DUP(op_AND_38)), DUP(op_AND_38)))); + RzILOpEffect *branch_57 = BRANCH(op_LT_41, seq_then_56, EMPTY()); + + // seq(h_tmp139; if ((max < ((st64) ((st16) ((Rss >> i * 0x10) & (( ...; + RzILOpEffect *seq_58 = branch_57; + + // seq(seq(h_tmp139; if ((max < ((st64) ((st16) ((Rss >> i * 0x10) ...; + RzILOpEffect *seq_59 = SEQN(2, seq_58, seq_31); + + // while ((i < 0x4)) { seq(seq(h_tmp139; if ((max < ((st64) ((st16) ((Rss >> i * 0x10) ... }; + RzILOpPure *op_LT_27 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_60 = REPEAT(op_LT_27, seq_59); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp139; if ((max < (( ...; + RzILOpEffect *seq_61 = SEQN(2, op_ASSIGN_25, for_60); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((max & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_66 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_67 = LOGNOT(op_LSHIFT_66); + RzILOpPure *op_AND_68 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_67); + RzILOpPure *op_AND_70 = LOGAND(VARL("max"), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_74 = SHIFTL0(op_AND_70, SN(32, 0)); + RzILOpPure *op_OR_75 = LOGOR(op_AND_68, op_LSHIFT_74); + RzILOpEffect *op_ASSIGN_76 = WRITE_REG(bundle, Rxx_op, op_OR_75); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((st64) addr) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_82 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_83 = LOGNOT(op_LSHIFT_82); + RzILOpPure *op_AND_84 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_83); + RzILOpPure *op_AND_87 = LOGAND(CAST(64, MSB(VARL("addr")), VARL("addr")), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_91 = SHIFTL0(op_AND_87, SN(32, 0x20)); + RzILOpPure *op_OR_92 = LOGOR(op_AND_84, op_LSHIFT_91); + RzILOpEffect *op_ASSIGN_93 = WRITE_REG(bundle, Rxx_op, op_OR_92); + + RzILOpEffect *instruction_sequence = SEQN(5, op_ASSIGN_13, op_ASSIGN_23, seq_61, op_ASSIGN_76, op_ASSIGN_93); + return instruction_sequence; +} + +// Rxx = vrmaxuh(Rss,Ru) +RzILOpEffect *hex_il_op_a4_vrmaxuh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + // Declare: st64 max; + // Declare: st32 addr; + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + + // max = ((st64) ((ut16) ((Rxx >> 0x0) & ((st64) 0xffff)))); + RzILOpPure *op_RSHIFT_7 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_7, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpEffect *op_ASSIGN_13 = SETL("max", CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_10))); + + // addr = ((st32) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff)))); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(64, 0xffffffff)); + RzILOpEffect *op_ASSIGN_23 = SETL("addr", CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_19), DUP(op_AND_19))), CAST(32, MSB(DUP(op_AND_19)), DUP(op_AND_19)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_19)), DUP(op_AND_19))), CAST(32, MSB(DUP(op_AND_19)), DUP(op_AND_19))))); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_25 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_28 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp140 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_30 = SETL("h_tmp140", VARL("i")); + + // seq(h_tmp140 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_31 = SEQN(2, op_ASSIGN_hybrid_tmp_30, op_INC_28); + + // max = ((st64) ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff)))); + RzILOpPure *op_MUL_43 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_44 = SHIFTRA(Rss, op_MUL_43); + RzILOpPure *op_AND_47 = LOGAND(op_RSHIFT_44, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpEffect *op_ASSIGN_50 = SETL("max", CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_47))); + + // addr = (Ru | (i << 0x1)); + RzILOpPure *op_LSHIFT_53 = SHIFTL0(VARL("i"), SN(32, 1)); + RzILOpPure *op_OR_54 = LOGOR(Ru, op_LSHIFT_53); + RzILOpEffect *op_ASSIGN_55 = SETL("addr", op_OR_54); + + // seq(max = ((st64) ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff))) ...; + RzILOpEffect *seq_then_56 = SEQN(2, op_ASSIGN_50, op_ASSIGN_55); + + // if ((max < ((st64) ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff)))))) {seq(max = ((st64) ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff))) ...} else {{}}; + RzILOpPure *op_MUL_34 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rss), op_MUL_34); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_35, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LT_41 = SLT(VARL("max"), CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_38))); + RzILOpEffect *branch_57 = BRANCH(op_LT_41, seq_then_56, EMPTY()); + + // seq(h_tmp140; if ((max < ((st64) ((ut16) ((Rss >> i * 0x10) & (( ...; + RzILOpEffect *seq_58 = branch_57; + + // seq(seq(h_tmp140; if ((max < ((st64) ((ut16) ((Rss >> i * 0x10) ...; + RzILOpEffect *seq_59 = SEQN(2, seq_58, seq_31); + + // while ((i < 0x4)) { seq(seq(h_tmp140; if ((max < ((st64) ((ut16) ((Rss >> i * 0x10) ... }; + RzILOpPure *op_LT_27 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_60 = REPEAT(op_LT_27, seq_59); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp140; if ((max < (( ...; + RzILOpEffect *seq_61 = SEQN(2, op_ASSIGN_25, for_60); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((max & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_66 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_67 = LOGNOT(op_LSHIFT_66); + RzILOpPure *op_AND_68 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_67); + RzILOpPure *op_AND_70 = LOGAND(VARL("max"), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_74 = SHIFTL0(op_AND_70, SN(32, 0)); + RzILOpPure *op_OR_75 = LOGOR(op_AND_68, op_LSHIFT_74); + RzILOpEffect *op_ASSIGN_76 = WRITE_REG(bundle, Rxx_op, op_OR_75); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((st64) addr) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_82 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_83 = LOGNOT(op_LSHIFT_82); + RzILOpPure *op_AND_84 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_83); + RzILOpPure *op_AND_87 = LOGAND(CAST(64, MSB(VARL("addr")), VARL("addr")), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_91 = SHIFTL0(op_AND_87, SN(32, 0x20)); + RzILOpPure *op_OR_92 = LOGOR(op_AND_84, op_LSHIFT_91); + RzILOpEffect *op_ASSIGN_93 = WRITE_REG(bundle, Rxx_op, op_OR_92); + + RzILOpEffect *instruction_sequence = SEQN(5, op_ASSIGN_13, op_ASSIGN_23, seq_61, op_ASSIGN_76, op_ASSIGN_93); + return instruction_sequence; +} + +// Rxx = vrmaxuw(Rss,Ru) +RzILOpEffect *hex_il_op_a4_vrmaxuw(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + // Declare: st64 max; + // Declare: st32 addr; + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + + // max = ((st64) ((ut64) ((ut32) ((Rxx >> 0x0) & 0xffffffff)))); + RzILOpPure *op_RSHIFT_7 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_9 = LOGAND(op_RSHIFT_7, SN(64, 0xffffffff)); + RzILOpEffect *op_ASSIGN_13 = SETL("max", CAST(64, IL_FALSE, CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_9)))); + + // addr = ((st32) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff)))); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(64, 0xffffffff)); + RzILOpEffect *op_ASSIGN_23 = SETL("addr", CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_19), DUP(op_AND_19))), CAST(32, MSB(DUP(op_AND_19)), DUP(op_AND_19)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_19)), DUP(op_AND_19))), CAST(32, MSB(DUP(op_AND_19)), DUP(op_AND_19))))); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_25 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_28 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp141 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_30 = SETL("h_tmp141", VARL("i")); + + // seq(h_tmp141 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_31 = SEQN(2, op_ASSIGN_hybrid_tmp_30, op_INC_28); + + // max = ((st64) ((ut64) ((ut32) ((Rss >> i * 0x20) & 0xffffffff)))); + RzILOpPure *op_MUL_43 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_44 = SHIFTRA(Rss, op_MUL_43); + RzILOpPure *op_AND_46 = LOGAND(op_RSHIFT_44, SN(64, 0xffffffff)); + RzILOpEffect *op_ASSIGN_50 = SETL("max", CAST(64, IL_FALSE, CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_46)))); + + // addr = (Ru | (i << 0x2)); + RzILOpPure *op_LSHIFT_53 = SHIFTL0(VARL("i"), SN(32, 2)); + RzILOpPure *op_OR_54 = LOGOR(Ru, op_LSHIFT_53); + RzILOpEffect *op_ASSIGN_55 = SETL("addr", op_OR_54); + + // seq(max = ((st64) ((ut64) ((ut32) ((Rss >> i * 0x20) & 0xfffffff ...; + RzILOpEffect *seq_then_56 = SEQN(2, op_ASSIGN_50, op_ASSIGN_55); + + // if ((((ut64) max) < ((ut64) ((ut32) ((Rss >> i * 0x20) & 0xffffffff))))) {seq(max = ((st64) ((ut64) ((ut32) ((Rss >> i * 0x20) & 0xfffffff ...} else {{}}; + RzILOpPure *op_MUL_34 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rss), op_MUL_34); + RzILOpPure *op_AND_37 = LOGAND(op_RSHIFT_35, SN(64, 0xffffffff)); + RzILOpPure *op_LT_41 = ULT(CAST(64, IL_FALSE, VARL("max")), CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_37))); + RzILOpEffect *branch_57 = BRANCH(op_LT_41, seq_then_56, EMPTY()); + + // seq(h_tmp141; if ((((ut64) max) < ((ut64) ((ut32) ((Rss >> i * 0 ...; + RzILOpEffect *seq_58 = branch_57; + + // seq(seq(h_tmp141; if ((((ut64) max) < ((ut64) ((ut32) ((Rss >> i ...; + RzILOpEffect *seq_59 = SEQN(2, seq_58, seq_31); + + // while ((i < 0x2)) { seq(seq(h_tmp141; if ((((ut64) max) < ((ut64) ((ut32) ((Rss >> i ... }; + RzILOpPure *op_LT_27 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_60 = REPEAT(op_LT_27, seq_59); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp141; if ((((ut64) ...; + RzILOpEffect *seq_61 = SEQN(2, op_ASSIGN_25, for_60); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((max & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_66 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_67 = LOGNOT(op_LSHIFT_66); + RzILOpPure *op_AND_68 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_67); + RzILOpPure *op_AND_70 = LOGAND(VARL("max"), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_74 = SHIFTL0(op_AND_70, SN(32, 0)); + RzILOpPure *op_OR_75 = LOGOR(op_AND_68, op_LSHIFT_74); + RzILOpEffect *op_ASSIGN_76 = WRITE_REG(bundle, Rxx_op, op_OR_75); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((st64) addr) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_82 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_83 = LOGNOT(op_LSHIFT_82); + RzILOpPure *op_AND_84 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_83); + RzILOpPure *op_AND_87 = LOGAND(CAST(64, MSB(VARL("addr")), VARL("addr")), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_91 = SHIFTL0(op_AND_87, SN(32, 0x20)); + RzILOpPure *op_OR_92 = LOGOR(op_AND_84, op_LSHIFT_91); + RzILOpEffect *op_ASSIGN_93 = WRITE_REG(bundle, Rxx_op, op_OR_92); + + RzILOpEffect *instruction_sequence = SEQN(5, op_ASSIGN_13, op_ASSIGN_23, seq_61, op_ASSIGN_76, op_ASSIGN_93); + return instruction_sequence; +} + +// Rxx = vrmaxw(Rss,Ru) +RzILOpEffect *hex_il_op_a4_vrmaxw(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + // Declare: st64 max; + // Declare: st32 addr; + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + + // max = ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))); + RzILOpPure *op_RSHIFT_7 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_9 = LOGAND(op_RSHIFT_7, SN(64, 0xffffffff)); + RzILOpEffect *op_ASSIGN_12 = SETL("max", CAST(64, MSB(CAST(32, MSB(op_AND_9), DUP(op_AND_9))), CAST(32, MSB(DUP(op_AND_9)), DUP(op_AND_9)))); + + // addr = ((st32) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff)))); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(64, 0xffffffff)); + RzILOpEffect *op_ASSIGN_22 = SETL("addr", CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_18), DUP(op_AND_18))), CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18))))); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_24 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_27 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp142 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_29 = SETL("h_tmp142", VARL("i")); + + // seq(h_tmp142 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_30 = SEQN(2, op_ASSIGN_hybrid_tmp_29, op_INC_27); + + // max = ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))); + RzILOpPure *op_MUL_41 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_42 = SHIFTRA(Rss, op_MUL_41); + RzILOpPure *op_AND_44 = LOGAND(op_RSHIFT_42, SN(64, 0xffffffff)); + RzILOpEffect *op_ASSIGN_47 = SETL("max", CAST(64, MSB(CAST(32, MSB(op_AND_44), DUP(op_AND_44))), CAST(32, MSB(DUP(op_AND_44)), DUP(op_AND_44)))); + + // addr = (Ru | (i << 0x2)); + RzILOpPure *op_LSHIFT_50 = SHIFTL0(VARL("i"), SN(32, 2)); + RzILOpPure *op_OR_51 = LOGOR(Ru, op_LSHIFT_50); + RzILOpEffect *op_ASSIGN_52 = SETL("addr", op_OR_51); + + // seq(max = ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))); ad ...; + RzILOpEffect *seq_then_53 = SEQN(2, op_ASSIGN_47, op_ASSIGN_52); + + // if ((max < ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))))) {seq(max = ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))); ad ...} else {{}}; + RzILOpPure *op_MUL_33 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_34 = SHIFTRA(DUP(Rss), op_MUL_33); + RzILOpPure *op_AND_36 = LOGAND(op_RSHIFT_34, SN(64, 0xffffffff)); + RzILOpPure *op_LT_39 = SLT(VARL("max"), CAST(64, MSB(CAST(32, MSB(op_AND_36), DUP(op_AND_36))), CAST(32, MSB(DUP(op_AND_36)), DUP(op_AND_36)))); + RzILOpEffect *branch_54 = BRANCH(op_LT_39, seq_then_53, EMPTY()); + + // seq(h_tmp142; if ((max < ((st64) ((st32) ((Rss >> i * 0x20) & 0x ...; + RzILOpEffect *seq_55 = branch_54; + + // seq(seq(h_tmp142; if ((max < ((st64) ((st32) ((Rss >> i * 0x20) ...; + RzILOpEffect *seq_56 = SEQN(2, seq_55, seq_30); + + // while ((i < 0x2)) { seq(seq(h_tmp142; if ((max < ((st64) ((st32) ((Rss >> i * 0x20) ... }; + RzILOpPure *op_LT_26 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_57 = REPEAT(op_LT_26, seq_56); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp142; if ((max < (( ...; + RzILOpEffect *seq_58 = SEQN(2, op_ASSIGN_24, for_57); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((max & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_63 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_64 = LOGNOT(op_LSHIFT_63); + RzILOpPure *op_AND_65 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_64); + RzILOpPure *op_AND_67 = LOGAND(VARL("max"), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_71 = SHIFTL0(op_AND_67, SN(32, 0)); + RzILOpPure *op_OR_72 = LOGOR(op_AND_65, op_LSHIFT_71); + RzILOpEffect *op_ASSIGN_73 = WRITE_REG(bundle, Rxx_op, op_OR_72); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((st64) addr) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_79 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_80 = LOGNOT(op_LSHIFT_79); + RzILOpPure *op_AND_81 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_80); + RzILOpPure *op_AND_84 = LOGAND(CAST(64, MSB(VARL("addr")), VARL("addr")), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_88 = SHIFTL0(op_AND_84, SN(32, 0x20)); + RzILOpPure *op_OR_89 = LOGOR(op_AND_81, op_LSHIFT_88); + RzILOpEffect *op_ASSIGN_90 = WRITE_REG(bundle, Rxx_op, op_OR_89); + + RzILOpEffect *instruction_sequence = SEQN(5, op_ASSIGN_12, op_ASSIGN_22, seq_58, op_ASSIGN_73, op_ASSIGN_90); + return instruction_sequence; +} + +// Rxx = vrminh(Rss,Ru) +RzILOpEffect *hex_il_op_a4_vrminh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + // Declare: st64 min; + // Declare: st32 addr; + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + + // min = ((st64) ((st16) ((Rxx >> 0x0) & ((st64) 0xffff)))); + RzILOpPure *op_RSHIFT_7 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_7, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpEffect *op_ASSIGN_13 = SETL("min", CAST(64, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10)))); + + // addr = ((st32) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff)))); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(64, 0xffffffff)); + RzILOpEffect *op_ASSIGN_23 = SETL("addr", CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_19), DUP(op_AND_19))), CAST(32, MSB(DUP(op_AND_19)), DUP(op_AND_19)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_19)), DUP(op_AND_19))), CAST(32, MSB(DUP(op_AND_19)), DUP(op_AND_19))))); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_25 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_28 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp143 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_30 = SETL("h_tmp143", VARL("i")); + + // seq(h_tmp143 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_31 = SEQN(2, op_ASSIGN_hybrid_tmp_30, op_INC_28); + + // min = ((st64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))); + RzILOpPure *op_MUL_43 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_44 = SHIFTRA(Rss, op_MUL_43); + RzILOpPure *op_AND_47 = LOGAND(op_RSHIFT_44, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpEffect *op_ASSIGN_50 = SETL("min", CAST(64, MSB(CAST(16, MSB(op_AND_47), DUP(op_AND_47))), CAST(16, MSB(DUP(op_AND_47)), DUP(op_AND_47)))); + + // addr = (Ru | (i << 0x1)); + RzILOpPure *op_LSHIFT_53 = SHIFTL0(VARL("i"), SN(32, 1)); + RzILOpPure *op_OR_54 = LOGOR(Ru, op_LSHIFT_53); + RzILOpEffect *op_ASSIGN_55 = SETL("addr", op_OR_54); + + // seq(min = ((st64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))) ...; + RzILOpEffect *seq_then_56 = SEQN(2, op_ASSIGN_50, op_ASSIGN_55); + + // if ((min > ((st64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))))) {seq(min = ((st64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))) ...} else {{}}; + RzILOpPure *op_MUL_34 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rss), op_MUL_34); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_35, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_GT_41 = SGT(VARL("min"), CAST(64, MSB(CAST(16, MSB(op_AND_38), DUP(op_AND_38))), CAST(16, MSB(DUP(op_AND_38)), DUP(op_AND_38)))); + RzILOpEffect *branch_57 = BRANCH(op_GT_41, seq_then_56, EMPTY()); + + // seq(h_tmp143; if ((min > ((st64) ((st16) ((Rss >> i * 0x10) & (( ...; + RzILOpEffect *seq_58 = branch_57; + + // seq(seq(h_tmp143; if ((min > ((st64) ((st16) ((Rss >> i * 0x10) ...; + RzILOpEffect *seq_59 = SEQN(2, seq_58, seq_31); + + // while ((i < 0x4)) { seq(seq(h_tmp143; if ((min > ((st64) ((st16) ((Rss >> i * 0x10) ... }; + RzILOpPure *op_LT_27 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_60 = REPEAT(op_LT_27, seq_59); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp143; if ((min > (( ...; + RzILOpEffect *seq_61 = SEQN(2, op_ASSIGN_25, for_60); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((min & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_66 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_67 = LOGNOT(op_LSHIFT_66); + RzILOpPure *op_AND_68 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_67); + RzILOpPure *op_AND_70 = LOGAND(VARL("min"), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_74 = SHIFTL0(op_AND_70, SN(32, 0)); + RzILOpPure *op_OR_75 = LOGOR(op_AND_68, op_LSHIFT_74); + RzILOpEffect *op_ASSIGN_76 = WRITE_REG(bundle, Rxx_op, op_OR_75); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((st64) addr) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_82 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_83 = LOGNOT(op_LSHIFT_82); + RzILOpPure *op_AND_84 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_83); + RzILOpPure *op_AND_87 = LOGAND(CAST(64, MSB(VARL("addr")), VARL("addr")), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_91 = SHIFTL0(op_AND_87, SN(32, 0x20)); + RzILOpPure *op_OR_92 = LOGOR(op_AND_84, op_LSHIFT_91); + RzILOpEffect *op_ASSIGN_93 = WRITE_REG(bundle, Rxx_op, op_OR_92); + + RzILOpEffect *instruction_sequence = SEQN(5, op_ASSIGN_13, op_ASSIGN_23, seq_61, op_ASSIGN_76, op_ASSIGN_93); + return instruction_sequence; +} + +// Rxx = vrminuh(Rss,Ru) +RzILOpEffect *hex_il_op_a4_vrminuh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + // Declare: st64 min; + // Declare: st32 addr; + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + + // min = ((st64) ((ut16) ((Rxx >> 0x0) & ((st64) 0xffff)))); + RzILOpPure *op_RSHIFT_7 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_7, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpEffect *op_ASSIGN_13 = SETL("min", CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_10))); + + // addr = ((st32) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff)))); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(64, 0xffffffff)); + RzILOpEffect *op_ASSIGN_23 = SETL("addr", CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_19), DUP(op_AND_19))), CAST(32, MSB(DUP(op_AND_19)), DUP(op_AND_19)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_19)), DUP(op_AND_19))), CAST(32, MSB(DUP(op_AND_19)), DUP(op_AND_19))))); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_25 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_28 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp144 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_30 = SETL("h_tmp144", VARL("i")); + + // seq(h_tmp144 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_31 = SEQN(2, op_ASSIGN_hybrid_tmp_30, op_INC_28); + + // min = ((st64) ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff)))); + RzILOpPure *op_MUL_43 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_44 = SHIFTRA(Rss, op_MUL_43); + RzILOpPure *op_AND_47 = LOGAND(op_RSHIFT_44, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpEffect *op_ASSIGN_50 = SETL("min", CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_47))); + + // addr = (Ru | (i << 0x1)); + RzILOpPure *op_LSHIFT_53 = SHIFTL0(VARL("i"), SN(32, 1)); + RzILOpPure *op_OR_54 = LOGOR(Ru, op_LSHIFT_53); + RzILOpEffect *op_ASSIGN_55 = SETL("addr", op_OR_54); + + // seq(min = ((st64) ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff))) ...; + RzILOpEffect *seq_then_56 = SEQN(2, op_ASSIGN_50, op_ASSIGN_55); + + // if ((min > ((st64) ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff)))))) {seq(min = ((st64) ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff))) ...} else {{}}; + RzILOpPure *op_MUL_34 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rss), op_MUL_34); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_35, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_GT_41 = SGT(VARL("min"), CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_38))); + RzILOpEffect *branch_57 = BRANCH(op_GT_41, seq_then_56, EMPTY()); + + // seq(h_tmp144; if ((min > ((st64) ((ut16) ((Rss >> i * 0x10) & (( ...; + RzILOpEffect *seq_58 = branch_57; + + // seq(seq(h_tmp144; if ((min > ((st64) ((ut16) ((Rss >> i * 0x10) ...; + RzILOpEffect *seq_59 = SEQN(2, seq_58, seq_31); + + // while ((i < 0x4)) { seq(seq(h_tmp144; if ((min > ((st64) ((ut16) ((Rss >> i * 0x10) ... }; + RzILOpPure *op_LT_27 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_60 = REPEAT(op_LT_27, seq_59); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp144; if ((min > (( ...; + RzILOpEffect *seq_61 = SEQN(2, op_ASSIGN_25, for_60); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((min & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_66 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_67 = LOGNOT(op_LSHIFT_66); + RzILOpPure *op_AND_68 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_67); + RzILOpPure *op_AND_70 = LOGAND(VARL("min"), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_74 = SHIFTL0(op_AND_70, SN(32, 0)); + RzILOpPure *op_OR_75 = LOGOR(op_AND_68, op_LSHIFT_74); + RzILOpEffect *op_ASSIGN_76 = WRITE_REG(bundle, Rxx_op, op_OR_75); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((st64) addr) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_82 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_83 = LOGNOT(op_LSHIFT_82); + RzILOpPure *op_AND_84 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_83); + RzILOpPure *op_AND_87 = LOGAND(CAST(64, MSB(VARL("addr")), VARL("addr")), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_91 = SHIFTL0(op_AND_87, SN(32, 0x20)); + RzILOpPure *op_OR_92 = LOGOR(op_AND_84, op_LSHIFT_91); + RzILOpEffect *op_ASSIGN_93 = WRITE_REG(bundle, Rxx_op, op_OR_92); + + RzILOpEffect *instruction_sequence = SEQN(5, op_ASSIGN_13, op_ASSIGN_23, seq_61, op_ASSIGN_76, op_ASSIGN_93); + return instruction_sequence; +} + +// Rxx = vrminuw(Rss,Ru) +RzILOpEffect *hex_il_op_a4_vrminuw(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + // Declare: st64 min; + // Declare: st32 addr; + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + + // min = ((st64) ((ut64) ((ut32) ((Rxx >> 0x0) & 0xffffffff)))); + RzILOpPure *op_RSHIFT_7 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_9 = LOGAND(op_RSHIFT_7, SN(64, 0xffffffff)); + RzILOpEffect *op_ASSIGN_13 = SETL("min", CAST(64, IL_FALSE, CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_9)))); + + // addr = ((st32) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff)))); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(64, 0xffffffff)); + RzILOpEffect *op_ASSIGN_23 = SETL("addr", CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_19), DUP(op_AND_19))), CAST(32, MSB(DUP(op_AND_19)), DUP(op_AND_19)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_19)), DUP(op_AND_19))), CAST(32, MSB(DUP(op_AND_19)), DUP(op_AND_19))))); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_25 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_28 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp145 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_30 = SETL("h_tmp145", VARL("i")); + + // seq(h_tmp145 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_31 = SEQN(2, op_ASSIGN_hybrid_tmp_30, op_INC_28); + + // min = ((st64) ((ut64) ((ut32) ((Rss >> i * 0x20) & 0xffffffff)))); + RzILOpPure *op_MUL_43 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_44 = SHIFTRA(Rss, op_MUL_43); + RzILOpPure *op_AND_46 = LOGAND(op_RSHIFT_44, SN(64, 0xffffffff)); + RzILOpEffect *op_ASSIGN_50 = SETL("min", CAST(64, IL_FALSE, CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_46)))); + + // addr = (Ru | (i << 0x2)); + RzILOpPure *op_LSHIFT_53 = SHIFTL0(VARL("i"), SN(32, 2)); + RzILOpPure *op_OR_54 = LOGOR(Ru, op_LSHIFT_53); + RzILOpEffect *op_ASSIGN_55 = SETL("addr", op_OR_54); + + // seq(min = ((st64) ((ut64) ((ut32) ((Rss >> i * 0x20) & 0xfffffff ...; + RzILOpEffect *seq_then_56 = SEQN(2, op_ASSIGN_50, op_ASSIGN_55); + + // if ((((ut64) min) > ((ut64) ((ut32) ((Rss >> i * 0x20) & 0xffffffff))))) {seq(min = ((st64) ((ut64) ((ut32) ((Rss >> i * 0x20) & 0xfffffff ...} else {{}}; + RzILOpPure *op_MUL_34 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rss), op_MUL_34); + RzILOpPure *op_AND_37 = LOGAND(op_RSHIFT_35, SN(64, 0xffffffff)); + RzILOpPure *op_GT_41 = UGT(CAST(64, IL_FALSE, VARL("min")), CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_37))); + RzILOpEffect *branch_57 = BRANCH(op_GT_41, seq_then_56, EMPTY()); + + // seq(h_tmp145; if ((((ut64) min) > ((ut64) ((ut32) ((Rss >> i * 0 ...; + RzILOpEffect *seq_58 = branch_57; + + // seq(seq(h_tmp145; if ((((ut64) min) > ((ut64) ((ut32) ((Rss >> i ...; + RzILOpEffect *seq_59 = SEQN(2, seq_58, seq_31); + + // while ((i < 0x2)) { seq(seq(h_tmp145; if ((((ut64) min) > ((ut64) ((ut32) ((Rss >> i ... }; + RzILOpPure *op_LT_27 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_60 = REPEAT(op_LT_27, seq_59); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp145; if ((((ut64) ...; + RzILOpEffect *seq_61 = SEQN(2, op_ASSIGN_25, for_60); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((min & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_66 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_67 = LOGNOT(op_LSHIFT_66); + RzILOpPure *op_AND_68 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_67); + RzILOpPure *op_AND_70 = LOGAND(VARL("min"), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_74 = SHIFTL0(op_AND_70, SN(32, 0)); + RzILOpPure *op_OR_75 = LOGOR(op_AND_68, op_LSHIFT_74); + RzILOpEffect *op_ASSIGN_76 = WRITE_REG(bundle, Rxx_op, op_OR_75); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((st64) addr) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_82 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_83 = LOGNOT(op_LSHIFT_82); + RzILOpPure *op_AND_84 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_83); + RzILOpPure *op_AND_87 = LOGAND(CAST(64, MSB(VARL("addr")), VARL("addr")), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_91 = SHIFTL0(op_AND_87, SN(32, 0x20)); + RzILOpPure *op_OR_92 = LOGOR(op_AND_84, op_LSHIFT_91); + RzILOpEffect *op_ASSIGN_93 = WRITE_REG(bundle, Rxx_op, op_OR_92); + + RzILOpEffect *instruction_sequence = SEQN(5, op_ASSIGN_13, op_ASSIGN_23, seq_61, op_ASSIGN_76, op_ASSIGN_93); + return instruction_sequence; +} + +// Rxx = vrminw(Rss,Ru) +RzILOpEffect *hex_il_op_a4_vrminw(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + // Declare: st64 min; + // Declare: st32 addr; + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + + // min = ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))); + RzILOpPure *op_RSHIFT_7 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_9 = LOGAND(op_RSHIFT_7, SN(64, 0xffffffff)); + RzILOpEffect *op_ASSIGN_12 = SETL("min", CAST(64, MSB(CAST(32, MSB(op_AND_9), DUP(op_AND_9))), CAST(32, MSB(DUP(op_AND_9)), DUP(op_AND_9)))); + + // addr = ((st32) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff)))); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(64, 0xffffffff)); + RzILOpEffect *op_ASSIGN_22 = SETL("addr", CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_18), DUP(op_AND_18))), CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18))))); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_24 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_27 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp146 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_29 = SETL("h_tmp146", VARL("i")); + + // seq(h_tmp146 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_30 = SEQN(2, op_ASSIGN_hybrid_tmp_29, op_INC_27); + + // min = ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))); + RzILOpPure *op_MUL_41 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_42 = SHIFTRA(Rss, op_MUL_41); + RzILOpPure *op_AND_44 = LOGAND(op_RSHIFT_42, SN(64, 0xffffffff)); + RzILOpEffect *op_ASSIGN_47 = SETL("min", CAST(64, MSB(CAST(32, MSB(op_AND_44), DUP(op_AND_44))), CAST(32, MSB(DUP(op_AND_44)), DUP(op_AND_44)))); + + // addr = (Ru | (i << 0x2)); + RzILOpPure *op_LSHIFT_50 = SHIFTL0(VARL("i"), SN(32, 2)); + RzILOpPure *op_OR_51 = LOGOR(Ru, op_LSHIFT_50); + RzILOpEffect *op_ASSIGN_52 = SETL("addr", op_OR_51); + + // seq(min = ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))); ad ...; + RzILOpEffect *seq_then_53 = SEQN(2, op_ASSIGN_47, op_ASSIGN_52); + + // if ((min > ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))))) {seq(min = ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))); ad ...} else {{}}; + RzILOpPure *op_MUL_33 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_34 = SHIFTRA(DUP(Rss), op_MUL_33); + RzILOpPure *op_AND_36 = LOGAND(op_RSHIFT_34, SN(64, 0xffffffff)); + RzILOpPure *op_GT_39 = SGT(VARL("min"), CAST(64, MSB(CAST(32, MSB(op_AND_36), DUP(op_AND_36))), CAST(32, MSB(DUP(op_AND_36)), DUP(op_AND_36)))); + RzILOpEffect *branch_54 = BRANCH(op_GT_39, seq_then_53, EMPTY()); + + // seq(h_tmp146; if ((min > ((st64) ((st32) ((Rss >> i * 0x20) & 0x ...; + RzILOpEffect *seq_55 = branch_54; + + // seq(seq(h_tmp146; if ((min > ((st64) ((st32) ((Rss >> i * 0x20) ...; + RzILOpEffect *seq_56 = SEQN(2, seq_55, seq_30); + + // while ((i < 0x2)) { seq(seq(h_tmp146; if ((min > ((st64) ((st32) ((Rss >> i * 0x20) ... }; + RzILOpPure *op_LT_26 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_57 = REPEAT(op_LT_26, seq_56); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp146; if ((min > (( ...; + RzILOpEffect *seq_58 = SEQN(2, op_ASSIGN_24, for_57); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((min & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_63 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_64 = LOGNOT(op_LSHIFT_63); + RzILOpPure *op_AND_65 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_64); + RzILOpPure *op_AND_67 = LOGAND(VARL("min"), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_71 = SHIFTL0(op_AND_67, SN(32, 0)); + RzILOpPure *op_OR_72 = LOGOR(op_AND_65, op_LSHIFT_71); + RzILOpEffect *op_ASSIGN_73 = WRITE_REG(bundle, Rxx_op, op_OR_72); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((st64) addr) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_79 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_80 = LOGNOT(op_LSHIFT_79); + RzILOpPure *op_AND_81 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_80); + RzILOpPure *op_AND_84 = LOGAND(CAST(64, MSB(VARL("addr")), VARL("addr")), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_88 = SHIFTL0(op_AND_84, SN(32, 0x20)); + RzILOpPure *op_OR_89 = LOGOR(op_AND_81, op_LSHIFT_88); + RzILOpEffect *op_ASSIGN_90 = WRITE_REG(bundle, Rxx_op, op_OR_89); + + RzILOpEffect *instruction_sequence = SEQN(5, op_ASSIGN_12, op_ASSIGN_22, seq_58, op_ASSIGN_73, op_ASSIGN_90); + return instruction_sequence; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_A5_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_A5_ops.c new file mode 100644 index 00000000000..4b125b33137 --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_A5_ops.c @@ -0,0 +1,258 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// Rxx,Pe = vacsh(Rss,Rtt) +RzILOpEffect *hex_il_op_a5_acs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + // Declare: st32 xv; + // Declare: st32 sv; + // Declare: st32 tv; + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Pe_op = ISA2REG(hi, 'e', false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_5 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_8 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp147 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_10 = SETL("h_tmp147", VARL("i")); + + // seq(h_tmp147 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_11 = SEQN(2, op_ASSIGN_hybrid_tmp_10, op_INC_8); + + // xv = ((st32) ((st16) ((Rxx >> i * 0x10) & ((st64) 0xffff)))); + RzILOpPure *op_MUL_14 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(READ_REG(pkt, Rxx_op, false), op_MUL_14); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_15, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpEffect *op_ASSIGN_21 = SETL("xv", CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18)))); + + // sv = ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))); + RzILOpPure *op_MUL_24 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_25 = SHIFTRA(Rss, op_MUL_24); + RzILOpPure *op_AND_28 = LOGAND(op_RSHIFT_25, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpEffect *op_ASSIGN_31 = SETL("sv", CAST(32, MSB(CAST(16, MSB(op_AND_28), DUP(op_AND_28))), CAST(16, MSB(DUP(op_AND_28)), DUP(op_AND_28)))); + + // tv = ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))); + RzILOpPure *op_MUL_34 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(Rtt, op_MUL_34); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_35, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpEffect *op_ASSIGN_41 = SETL("tv", CAST(32, MSB(CAST(16, MSB(op_AND_38), DUP(op_AND_38))), CAST(16, MSB(DUP(op_AND_38)), DUP(op_AND_38)))); + + // xv = xv + tv; + RzILOpPure *op_ADD_42 = ADD(VARL("xv"), VARL("tv")); + RzILOpEffect *op_ASSIGN_43 = SETL("xv", op_ADD_42); + + // sv = sv - tv; + RzILOpPure *op_SUB_44 = SUB(VARL("sv"), VARL("tv")); + RzILOpEffect *op_ASSIGN_45 = SETL("sv", op_SUB_44); + + // Pe = ((st8) ((((ut64) ((st32) Pe)) & (~(0x1 << i * 0x2))) | (((xv > sv) ? 0x1 : 0x0) << i * 0x2))); + RzILOpPure *op_MUL_49 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_LSHIFT_50 = SHIFTL0(UN(64, 1), op_MUL_49); + RzILOpPure *op_NOT_51 = LOGNOT(op_LSHIFT_50); + RzILOpPure *op_AND_54 = LOGAND(CAST(64, IL_FALSE, CAST(32, MSB(READ_REG(pkt, Pe_op, true)), READ_REG(pkt, Pe_op, true))), op_NOT_51); + RzILOpPure *op_GT_55 = SGT(VARL("xv"), VARL("sv")); + RzILOpPure *ite_cast_ut64_56 = ITE(op_GT_55, UN(64, 1), UN(64, 0)); + RzILOpPure *op_MUL_58 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_LSHIFT_59 = SHIFTL0(ite_cast_ut64_56, op_MUL_58); + RzILOpPure *op_OR_60 = LOGOR(op_AND_54, op_LSHIFT_59); + RzILOpEffect *op_ASSIGN_62 = WRITE_REG(bundle, Pe_op, CAST(8, IL_FALSE, op_OR_60)); + + // Pe = ((st8) ((((ut64) ((st32) Pe)) & (~(0x1 << i * 0x2 + 0x1))) | (((xv > sv) ? 0x1 : 0x0) << i * 0x2 + 0x1))); + RzILOpPure *op_MUL_66 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_ADD_68 = ADD(op_MUL_66, SN(32, 1)); + RzILOpPure *op_LSHIFT_69 = SHIFTL0(UN(64, 1), op_ADD_68); + RzILOpPure *op_NOT_70 = LOGNOT(op_LSHIFT_69); + RzILOpPure *op_AND_73 = LOGAND(CAST(64, IL_FALSE, CAST(32, MSB(READ_REG(pkt, Pe_op, true)), READ_REG(pkt, Pe_op, true))), op_NOT_70); + RzILOpPure *op_GT_74 = SGT(VARL("xv"), VARL("sv")); + RzILOpPure *ite_cast_ut64_75 = ITE(op_GT_74, UN(64, 1), UN(64, 0)); + RzILOpPure *op_MUL_77 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_ADD_79 = ADD(op_MUL_77, SN(32, 1)); + RzILOpPure *op_LSHIFT_80 = SHIFTL0(ite_cast_ut64_75, op_ADD_79); + RzILOpPure *op_OR_81 = LOGOR(op_AND_73, op_LSHIFT_80); + RzILOpEffect *op_ASSIGN_83 = WRITE_REG(bundle, Pe_op, CAST(8, IL_FALSE, op_OR_81)); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_109 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((xv > sv) ? xv : sv)), 0x0, 0x10) == ((st64) ((xv > sv) ? xv : sv)))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((xv > sv) ? xv : sv) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_GT_94 = SGT(VARL("xv"), VARL("sv")); + RzILOpPure *cond_95 = ITE(op_GT_94, VARL("xv"), VARL("sv")); + RzILOpPure *op_GT_101 = SGT(VARL("xv"), VARL("sv")); + RzILOpPure *cond_102 = ITE(op_GT_101, VARL("xv"), VARL("sv")); + RzILOpPure *op_EQ_104 = EQ(SEXTRACT64(CAST(64, IL_FALSE, cond_95), SN(32, 0), SN(32, 16)), CAST(64, MSB(cond_102), DUP(cond_102))); + RzILOpPure *op_GT_110 = SGT(VARL("xv"), VARL("sv")); + RzILOpPure *cond_111 = ITE(op_GT_110, VARL("xv"), VARL("sv")); + RzILOpPure *op_LT_113 = SLT(cond_111, SN(32, 0)); + RzILOpPure *op_LSHIFT_118 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_119 = NEG(op_LSHIFT_118); + RzILOpPure *op_LSHIFT_124 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_127 = SUB(op_LSHIFT_124, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_128 = ITE(op_LT_113, op_NEG_119, op_SUB_127); + RzILOpEffect *gcc_expr_129 = BRANCH(op_EQ_104, EMPTY(), set_usr_field_call_109); + + // h_tmp148 = HYB(gcc_expr_if ((sextract64(((ut64) ((xv > sv) ? xv : sv)), 0x0, 0x10) == ((st64) ((xv > sv) ? xv : sv)))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((xv > sv) ? xv : sv) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_131 = SETL("h_tmp148", cond_128); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((xv > sv) ? xv : sv)), ...; + RzILOpEffect *seq_132 = SEQN(2, gcc_expr_129, op_ASSIGN_hybrid_tmp_131); + + // Rxx = ((st64) (((ut64) (Rxx & (~(0xffff << i * 0x10)))) | (((ut64) (((sextract64(((ut64) ((xv > sv) ? xv : sv)), 0x0, 0x10) == ((st64) ((xv > sv) ? xv : sv))) ? ((st64) ((xv > sv) ? xv : sv)) : h_tmp148) & ((st64) 0xffff))) << i * 0x10))); + RzILOpPure *op_MUL_87 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_88 = SHIFTL0(SN(64, 0xffff), op_MUL_87); + RzILOpPure *op_NOT_89 = LOGNOT(op_LSHIFT_88); + RzILOpPure *op_AND_90 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_89); + RzILOpPure *op_GT_105 = SGT(VARL("xv"), VARL("sv")); + RzILOpPure *cond_106 = ITE(op_GT_105, VARL("xv"), VARL("sv")); + RzILOpPure *cond_134 = ITE(DUP(op_EQ_104), CAST(64, MSB(cond_106), DUP(cond_106)), VARL("h_tmp148")); + RzILOpPure *op_AND_137 = LOGAND(cond_134, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_140 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_141 = SHIFTL0(CAST(64, IL_FALSE, op_AND_137), op_MUL_140); + RzILOpPure *op_OR_143 = LOGOR(CAST(64, IL_FALSE, op_AND_90), op_LSHIFT_141); + RzILOpEffect *op_ASSIGN_145 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_OR_143)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((xv > sv) ? xv : s ...; + RzILOpEffect *seq_146 = SEQN(2, seq_132, op_ASSIGN_145); + + // seq(h_tmp147; xv = ((st32) ((st16) ((Rxx >> i * 0x10) & ((st64) ...; + RzILOpEffect *seq_148 = SEQN(8, op_ASSIGN_21, op_ASSIGN_31, op_ASSIGN_41, op_ASSIGN_43, op_ASSIGN_45, op_ASSIGN_62, op_ASSIGN_83, seq_146); + + // seq(seq(h_tmp147; xv = ((st32) ((st16) ((Rxx >> i * 0x10) & ((st ...; + RzILOpEffect *seq_149 = SEQN(2, seq_148, seq_11); + + // while ((i < 0x4)) { seq(seq(h_tmp147; xv = ((st32) ((st16) ((Rxx >> i * 0x10) & ((st ... }; + RzILOpPure *op_LT_7 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_150 = REPEAT(op_LT_7, seq_149); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp147; xv = ((st32) ...; + RzILOpEffect *seq_151 = SEQN(2, op_ASSIGN_5, for_150); + + RzILOpEffect *instruction_sequence = seq_151; + return instruction_sequence; +} + +// Rd = vaddhub(Rss,Rtt):sat +RzILOpEffect *hex_il_op_a5_vaddhubs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp149 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp149", VARL("i")); + + // seq(h_tmp149 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_82 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((extract64(((ut64) ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff))))), 0x0, 0x8) == ((ut64) ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) < 0x0) ? ((st64) 0x0) : (0x1 << 0x8) - ((st64) 0x1))); + RzILOpPure *op_MUL_22 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rss, op_MUL_22); + RzILOpPure *op_AND_26 = LOGAND(op_RSHIFT_23, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_30 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_31 = SHIFTRA(Rtt, op_MUL_30); + RzILOpPure *op_AND_34 = LOGAND(op_RSHIFT_31, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_38 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_26), DUP(op_AND_26))), CAST(16, MSB(DUP(op_AND_26)), DUP(op_AND_26))), CAST(32, MSB(CAST(16, MSB(op_AND_34), DUP(op_AND_34))), CAST(16, MSB(DUP(op_AND_34)), DUP(op_AND_34)))); + RzILOpPure *op_MUL_45 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_46 = SHIFTRA(DUP(Rss), op_MUL_45); + RzILOpPure *op_AND_49 = LOGAND(op_RSHIFT_46, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_52 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_53 = SHIFTRA(DUP(Rtt), op_MUL_52); + RzILOpPure *op_AND_56 = LOGAND(op_RSHIFT_53, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_60 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_49), DUP(op_AND_49))), CAST(16, MSB(DUP(op_AND_49)), DUP(op_AND_49))), CAST(32, MSB(CAST(16, MSB(op_AND_56), DUP(op_AND_56))), CAST(16, MSB(DUP(op_AND_56)), DUP(op_AND_56)))); + RzILOpPure *op_EQ_62 = EQ(EXTRACT64(CAST(64, IL_FALSE, op_ADD_38), SN(32, 0), SN(32, 8)), CAST(64, IL_FALSE, op_ADD_60)); + RzILOpPure *op_MUL_84 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_85 = SHIFTRA(DUP(Rss), op_MUL_84); + RzILOpPure *op_AND_88 = LOGAND(op_RSHIFT_85, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_91 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_92 = SHIFTRA(DUP(Rtt), op_MUL_91); + RzILOpPure *op_AND_95 = LOGAND(op_RSHIFT_92, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_99 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_88), DUP(op_AND_88))), CAST(16, MSB(DUP(op_AND_88)), DUP(op_AND_88))), CAST(32, MSB(CAST(16, MSB(op_AND_95), DUP(op_AND_95))), CAST(16, MSB(DUP(op_AND_95)), DUP(op_AND_95)))); + RzILOpPure *op_LT_101 = SLT(op_ADD_99, SN(32, 0)); + RzILOpPure *op_LSHIFT_105 = SHIFTL0(SN(64, 1), SN(32, 8)); + RzILOpPure *op_SUB_108 = SUB(op_LSHIFT_105, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_110 = ITE(op_LT_101, CAST(64, MSB(SN(32, 0)), SN(32, 0)), op_SUB_108); + RzILOpEffect *gcc_expr_111 = BRANCH(op_EQ_62, EMPTY(), set_usr_field_call_82); + + // h_tmp150 = HYB(gcc_expr_if ((extract64(((ut64) ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff))))), 0x0, 0x8) == ((ut64) ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) < 0x0) ? ((st64) 0x0) : (0x1 << 0x8) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_113 = SETL("h_tmp150", cond_110); + + // seq(HYB(gcc_expr_if ((extract64(((ut64) ((st32) ((st16) ((Rss >> ...; + RzILOpEffect *seq_114 = SEQN(2, gcc_expr_111, op_ASSIGN_hybrid_tmp_113); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xff << i * 0x8)))) | (((ut64) (((extract64(((ut64) ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff))))), 0x0, 0x8) == ((ut64) ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))))) ? ((st64) ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff))))) : h_tmp150) & 0xff)) << i * 0x8))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_16 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_14); + RzILOpPure *op_MUL_64 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_65 = SHIFTRA(DUP(Rss), op_MUL_64); + RzILOpPure *op_AND_68 = LOGAND(op_RSHIFT_65, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_71 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_72 = SHIFTRA(DUP(Rtt), op_MUL_71); + RzILOpPure *op_AND_75 = LOGAND(op_RSHIFT_72, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_79 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_68), DUP(op_AND_68))), CAST(16, MSB(DUP(op_AND_68)), DUP(op_AND_68))), CAST(32, MSB(CAST(16, MSB(op_AND_75), DUP(op_AND_75))), CAST(16, MSB(DUP(op_AND_75)), DUP(op_AND_75)))); + RzILOpPure *cond_116 = ITE(DUP(op_EQ_62), CAST(64, MSB(op_ADD_79), DUP(op_ADD_79)), VARL("h_tmp150")); + RzILOpPure *op_AND_118 = LOGAND(cond_116, SN(64, 0xff)); + RzILOpPure *op_MUL_121 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_122 = SHIFTL0(CAST(64, IL_FALSE, op_AND_118), op_MUL_121); + RzILOpPure *op_OR_124 = LOGOR(CAST(64, IL_FALSE, op_AND_16), op_LSHIFT_122); + RzILOpEffect *op_ASSIGN_126 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_124)); + + // seq(seq(HYB(gcc_expr_if ((extract64(((ut64) ((st32) ((st16) ((Rs ...; + RzILOpEffect *seq_127 = SEQN(2, seq_114, op_ASSIGN_126); + + // seq(h_tmp149; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) ((st32 ...; + RzILOpEffect *seq_129 = seq_127; + + // seq(seq(h_tmp149; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) (( ...; + RzILOpEffect *seq_130 = SEQN(2, seq_129, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp149; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) (( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_131 = REPEAT(op_LT_4, seq_130); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp149; seq(seq(HYB(g ...; + RzILOpEffect *seq_132 = SEQN(2, op_ASSIGN_2, for_131); + + RzILOpEffect *instruction_sequence = seq_132; + return instruction_sequence; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_A6_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_A6_ops.c new file mode 100644 index 00000000000..93b14c9cc79 --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_A6_ops.c @@ -0,0 +1,164 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// Pd = !any8(vcmpb.eq(Rss,Rtt)) +RzILOpEffect *hex_il_op_a6_vcmpbeq_notany(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Pd = ((st8) 0x0); + RzILOpEffect *op_ASSIGN_4 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(SN(32, 0)), SN(32, 0))); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_6 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_9 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp151 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_11 = SETL("h_tmp151", VARL("i")); + + // seq(h_tmp151 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_12 = SEQN(2, op_ASSIGN_hybrid_tmp_11, op_INC_9); + + // Pd = ((st8) 0xff); + RzILOpEffect *op_ASSIGN_32 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(SN(32, 0xff)), SN(32, 0xff))); + + // seq(Pd = ((st8) 0xff)); + RzILOpEffect *seq_then_33 = op_ASSIGN_32; + + // if ((((st8) ((Rss >> i * 0x8) & ((st64) 0xff))) == ((st8) ((Rtt >> i * 0x8) & ((st64) 0xff))))) {seq(Pd = ((st8) 0xff))} else {{}}; + RzILOpPure *op_MUL_15 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rss, op_MUL_15); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_16, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_23 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_24 = SHIFTRA(Rtt, op_MUL_23); + RzILOpPure *op_AND_27 = LOGAND(op_RSHIFT_24, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_EQ_29 = EQ(CAST(8, MSB(op_AND_19), DUP(op_AND_19)), CAST(8, MSB(op_AND_27), DUP(op_AND_27))); + RzILOpEffect *branch_34 = BRANCH(op_EQ_29, seq_then_33, EMPTY()); + + // seq(h_tmp151; if ((((st8) ((Rss >> i * 0x8) & ((st64) 0xff))) == ...; + RzILOpEffect *seq_35 = branch_34; + + // seq(seq(h_tmp151; if ((((st8) ((Rss >> i * 0x8) & ((st64) 0xff)) ...; + RzILOpEffect *seq_36 = SEQN(2, seq_35, seq_12); + + // while ((i < 0x8)) { seq(seq(h_tmp151; if ((((st8) ((Rss >> i * 0x8) & ((st64) 0xff)) ... }; + RzILOpPure *op_LT_8 = SLT(VARL("i"), SN(32, 8)); + RzILOpEffect *for_37 = REPEAT(op_LT_8, seq_36); + + // seq(i = 0x0; while ((i < 0x8)) { seq(seq(h_tmp151; if ((((st8) ( ...; + RzILOpEffect *seq_38 = SEQN(2, op_ASSIGN_6, for_37); + + // Pd = ((st8) (~((st32) Pd))); + RzILOpPure *op_NOT_40 = LOGNOT(CAST(32, MSB(READ_REG(pkt, Pd_op, true)), READ_REG(pkt, Pd_op, true))); + RzILOpEffect *op_ASSIGN_42 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(op_NOT_40), DUP(op_NOT_40))); + + RzILOpEffect *instruction_sequence = SEQN(3, op_ASSIGN_4, seq_38, op_ASSIGN_42); + return instruction_sequence; +} + +// Rdd,Pe = vminub(Rtt,Rss) +RzILOpEffect *hex_il_op_a6_vminub_rdp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Pe_op = ISA2REG(hi, 'e', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp152 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp152", VARL("i")); + + // seq(h_tmp152 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Pe = ((st8) ((((ut64) ((st32) Pe)) & (~(0x1 << i))) | (((((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff))) > ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) ? 0x1 : 0x0) << i))); + RzILOpPure *op_LSHIFT_11 = SHIFTL0(UN(64, 1), VARL("i")); + RzILOpPure *op_NOT_12 = LOGNOT(op_LSHIFT_11); + RzILOpPure *op_AND_15 = LOGAND(CAST(64, IL_FALSE, CAST(32, MSB(READ_REG(pkt, Pe_op, true)), READ_REG(pkt, Pe_op, true))), op_NOT_12); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rtt, op_MUL_18); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rss, op_MUL_26); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_27, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_GT_32 = UGT(CAST(8, IL_FALSE, op_AND_22), CAST(8, IL_FALSE, op_AND_30)); + RzILOpPure *ite_cast_ut64_33 = ITE(op_GT_32, UN(64, 1), UN(64, 0)); + RzILOpPure *op_LSHIFT_34 = SHIFTL0(ite_cast_ut64_33, VARL("i")); + RzILOpPure *op_OR_35 = LOGOR(op_AND_15, op_LSHIFT_34); + RzILOpEffect *op_ASSIGN_37 = WRITE_REG(bundle, Pe_op, CAST(8, IL_FALSE, op_OR_35)); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x8)))) | (((ut64) (((st64) ((st32) ((((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff))) < ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) ? ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff))) : ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))))) & 0xff)) << i * 0x8))); + RzILOpPure *op_MUL_42 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_43 = SHIFTL0(SN(64, 0xff), op_MUL_42); + RzILOpPure *op_NOT_44 = LOGNOT(op_LSHIFT_43); + RzILOpPure *op_AND_45 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_44); + RzILOpPure *op_MUL_47 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_48 = SHIFTRA(DUP(Rtt), op_MUL_47); + RzILOpPure *op_AND_51 = LOGAND(op_RSHIFT_48, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_54 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_55 = SHIFTRA(DUP(Rss), op_MUL_54); + RzILOpPure *op_AND_58 = LOGAND(op_RSHIFT_55, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_LT_60 = ULT(CAST(8, IL_FALSE, op_AND_51), CAST(8, IL_FALSE, op_AND_58)); + RzILOpPure *op_MUL_62 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_63 = SHIFTRA(DUP(Rtt), op_MUL_62); + RzILOpPure *op_AND_66 = LOGAND(op_RSHIFT_63, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_69 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_70 = SHIFTRA(DUP(Rss), op_MUL_69); + RzILOpPure *op_AND_73 = LOGAND(op_RSHIFT_70, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *cond_75 = ITE(op_LT_60, CAST(8, IL_FALSE, op_AND_66), CAST(8, IL_FALSE, op_AND_73)); + RzILOpPure *op_AND_79 = LOGAND(CAST(64, MSB(CAST(32, IL_FALSE, cond_75)), CAST(32, IL_FALSE, DUP(cond_75))), SN(64, 0xff)); + RzILOpPure *op_MUL_82 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_83 = SHIFTL0(CAST(64, IL_FALSE, op_AND_79), op_MUL_82); + RzILOpPure *op_OR_85 = LOGOR(CAST(64, IL_FALSE, op_AND_45), op_LSHIFT_83); + RzILOpEffect *op_ASSIGN_87 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_85)); + + // seq(h_tmp152; Pe = ((st8) ((((ut64) ((st32) Pe)) & (~(0x1 << i)) ...; + RzILOpEffect *seq_89 = SEQN(2, op_ASSIGN_37, op_ASSIGN_87); + + // seq(seq(h_tmp152; Pe = ((st8) ((((ut64) ((st32) Pe)) & (~(0x1 << ...; + RzILOpEffect *seq_90 = SEQN(2, seq_89, seq_8); + + // while ((i < 0x8)) { seq(seq(h_tmp152; Pe = ((st8) ((((ut64) ((st32) Pe)) & (~(0x1 << ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 8)); + RzILOpEffect *for_91 = REPEAT(op_LT_4, seq_90); + + // seq(i = 0x0; while ((i < 0x8)) { seq(seq(h_tmp152; Pe = ((st8) ( ...; + RzILOpEffect *seq_92 = SEQN(2, op_ASSIGN_2, for_91); + + RzILOpEffect *instruction_sequence = seq_92; + return instruction_sequence; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_A7_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_A7_ops.c new file mode 100644 index 00000000000..4433429e16d --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_A7_ops.c @@ -0,0 +1,157 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// Rd = clip(Rs,Ii) +RzILOpEffect *hex_il_op_a7_clip(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: st32 maxv; + // Declare: st32 minv; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // maxv = (0x1 << u) - 0x1; + RzILOpPure *op_LSHIFT_3 = SHIFTL0(SN(32, 1), VARL("u")); + RzILOpPure *op_SUB_5 = SUB(op_LSHIFT_3, SN(32, 1)); + RzILOpEffect *op_ASSIGN_7 = SETL("maxv", op_SUB_5); + + // minv = (-(0x1 << u)); + RzILOpPure *op_LSHIFT_9 = SHIFTL0(SN(32, 1), VARL("u")); + RzILOpPure *op_NEG_10 = NEG(op_LSHIFT_9); + RzILOpEffect *op_ASSIGN_12 = SETL("minv", op_NEG_10); + + // Rd = ((maxv < ((Rs > minv) ? Rs : minv)) ? maxv : ((Rs > minv) ? Rs : minv)); + RzILOpPure *op_GT_15 = SGT(Rs, VARL("minv")); + RzILOpPure *cond_16 = ITE(op_GT_15, DUP(Rs), VARL("minv")); + RzILOpPure *op_LT_17 = SLT(VARL("maxv"), cond_16); + RzILOpPure *op_GT_18 = SGT(DUP(Rs), VARL("minv")); + RzILOpPure *cond_19 = ITE(op_GT_18, DUP(Rs), VARL("minv")); + RzILOpPure *cond_20 = ITE(op_LT_17, VARL("maxv"), cond_19); + RzILOpEffect *op_ASSIGN_21 = WRITE_REG(bundle, Rd_op, cond_20); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_1, op_ASSIGN_7, op_ASSIGN_12, op_ASSIGN_21); + return instruction_sequence; +} + +// Rdd = cround(Rss,Ii) +RzILOpEffect *hex_il_op_a7_croundd_ri(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rdd = cround(Rss,Rt) +RzILOpEffect *hex_il_op_a7_croundd_rr(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rdd = vclip(Rss,Ii) +RzILOpEffect *hex_il_op_a7_vclip(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 tmp; + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: st32 maxv; + // Declare: st32 minv; + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // maxv = (0x1 << u) - 0x1; + RzILOpPure *op_LSHIFT_4 = SHIFTL0(SN(32, 1), VARL("u")); + RzILOpPure *op_SUB_6 = SUB(op_LSHIFT_4, SN(32, 1)); + RzILOpEffect *op_ASSIGN_8 = SETL("maxv", op_SUB_6); + + // minv = (-(0x1 << u)); + RzILOpPure *op_LSHIFT_10 = SHIFTL0(SN(32, 1), VARL("u")); + RzILOpPure *op_NEG_11 = NEG(op_LSHIFT_10); + RzILOpEffect *op_ASSIGN_13 = SETL("minv", op_NEG_11); + + // tmp = ((st32) ((((st64) maxv) < ((((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))) > ((st64) minv)) ? ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))) : ((st64) minv))) ? ((st64) maxv) : ((((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))) > ((st64) minv)) ? ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))) : ((st64) minv)))); + RzILOpPure *op_RSHIFT_18 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_20 = LOGAND(op_RSHIFT_18, SN(64, 0xffffffff)); + RzILOpPure *op_GT_24 = SGT(CAST(64, MSB(CAST(32, MSB(op_AND_20), DUP(op_AND_20))), CAST(32, MSB(DUP(op_AND_20)), DUP(op_AND_20))), CAST(64, MSB(VARL("minv")), VARL("minv"))); + RzILOpPure *op_RSHIFT_28 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_28, SN(64, 0xffffffff)); + RzILOpPure *cond_34 = ITE(op_GT_24, CAST(64, MSB(CAST(32, MSB(op_AND_30), DUP(op_AND_30))), CAST(32, MSB(DUP(op_AND_30)), DUP(op_AND_30))), CAST(64, MSB(VARL("minv")), VARL("minv"))); + RzILOpPure *op_LT_36 = SLT(CAST(64, MSB(VARL("maxv")), VARL("maxv")), cond_34); + RzILOpPure *op_RSHIFT_40 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_42 = LOGAND(op_RSHIFT_40, SN(64, 0xffffffff)); + RzILOpPure *op_GT_46 = SGT(CAST(64, MSB(CAST(32, MSB(op_AND_42), DUP(op_AND_42))), CAST(32, MSB(DUP(op_AND_42)), DUP(op_AND_42))), CAST(64, MSB(VARL("minv")), VARL("minv"))); + RzILOpPure *op_RSHIFT_50 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_52 = LOGAND(op_RSHIFT_50, SN(64, 0xffffffff)); + RzILOpPure *cond_56 = ITE(op_GT_46, CAST(64, MSB(CAST(32, MSB(op_AND_52), DUP(op_AND_52))), CAST(32, MSB(DUP(op_AND_52)), DUP(op_AND_52))), CAST(64, MSB(VARL("minv")), VARL("minv"))); + RzILOpPure *cond_58 = ITE(op_LT_36, CAST(64, MSB(VARL("maxv")), VARL("maxv")), cond_56); + RzILOpEffect *op_ASSIGN_60 = SETL("tmp", CAST(32, MSB(cond_58), DUP(cond_58))); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((st64) tmp) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_67 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_68 = LOGNOT(op_LSHIFT_67); + RzILOpPure *op_AND_69 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_68); + RzILOpPure *op_AND_72 = LOGAND(CAST(64, MSB(VARL("tmp")), VARL("tmp")), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_76 = SHIFTL0(op_AND_72, SN(32, 0)); + RzILOpPure *op_OR_77 = LOGOR(op_AND_69, op_LSHIFT_76); + RzILOpEffect *op_ASSIGN_78 = WRITE_REG(bundle, Rdd_op, op_OR_77); + + // maxv = (0x1 << u) - 0x1; + RzILOpPure *op_LSHIFT_81 = SHIFTL0(SN(32, 1), VARL("u")); + RzILOpPure *op_SUB_83 = SUB(op_LSHIFT_81, SN(32, 1)); + RzILOpEffect *op_ASSIGN_84 = SETL("maxv", op_SUB_83); + + // minv = (-(0x1 << u)); + RzILOpPure *op_LSHIFT_86 = SHIFTL0(SN(32, 1), VARL("u")); + RzILOpPure *op_NEG_87 = NEG(op_LSHIFT_86); + RzILOpEffect *op_ASSIGN_88 = SETL("minv", op_NEG_87); + + // tmp = ((st32) ((((st64) maxv) < ((((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))) > ((st64) minv)) ? ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))) : ((st64) minv))) ? ((st64) maxv) : ((((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))) > ((st64) minv)) ? ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))) : ((st64) minv)))); + RzILOpPure *op_RSHIFT_92 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_94 = LOGAND(op_RSHIFT_92, SN(64, 0xffffffff)); + RzILOpPure *op_GT_98 = SGT(CAST(64, MSB(CAST(32, MSB(op_AND_94), DUP(op_AND_94))), CAST(32, MSB(DUP(op_AND_94)), DUP(op_AND_94))), CAST(64, MSB(VARL("minv")), VARL("minv"))); + RzILOpPure *op_RSHIFT_102 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_104 = LOGAND(op_RSHIFT_102, SN(64, 0xffffffff)); + RzILOpPure *cond_108 = ITE(op_GT_98, CAST(64, MSB(CAST(32, MSB(op_AND_104), DUP(op_AND_104))), CAST(32, MSB(DUP(op_AND_104)), DUP(op_AND_104))), CAST(64, MSB(VARL("minv")), VARL("minv"))); + RzILOpPure *op_LT_110 = SLT(CAST(64, MSB(VARL("maxv")), VARL("maxv")), cond_108); + RzILOpPure *op_RSHIFT_114 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_116 = LOGAND(op_RSHIFT_114, SN(64, 0xffffffff)); + RzILOpPure *op_GT_120 = SGT(CAST(64, MSB(CAST(32, MSB(op_AND_116), DUP(op_AND_116))), CAST(32, MSB(DUP(op_AND_116)), DUP(op_AND_116))), CAST(64, MSB(VARL("minv")), VARL("minv"))); + RzILOpPure *op_RSHIFT_124 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_126 = LOGAND(op_RSHIFT_124, SN(64, 0xffffffff)); + RzILOpPure *cond_130 = ITE(op_GT_120, CAST(64, MSB(CAST(32, MSB(op_AND_126), DUP(op_AND_126))), CAST(32, MSB(DUP(op_AND_126)), DUP(op_AND_126))), CAST(64, MSB(VARL("minv")), VARL("minv"))); + RzILOpPure *cond_132 = ITE(op_LT_110, CAST(64, MSB(VARL("maxv")), VARL("maxv")), cond_130); + RzILOpEffect *op_ASSIGN_134 = SETL("tmp", CAST(32, MSB(cond_132), DUP(cond_132))); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((st64) tmp) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_140 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_141 = LOGNOT(op_LSHIFT_140); + RzILOpPure *op_AND_142 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_141); + RzILOpPure *op_AND_145 = LOGAND(CAST(64, MSB(VARL("tmp")), VARL("tmp")), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_149 = SHIFTL0(op_AND_145, SN(32, 0x20)); + RzILOpPure *op_OR_150 = LOGOR(op_AND_142, op_LSHIFT_149); + RzILOpEffect *op_ASSIGN_151 = WRITE_REG(bundle, Rdd_op, op_OR_150); + + RzILOpEffect *instruction_sequence = SEQN(9, imm_assign_2, op_ASSIGN_8, op_ASSIGN_13, op_ASSIGN_60, op_ASSIGN_78, op_ASSIGN_84, op_ASSIGN_88, op_ASSIGN_134, op_ASSIGN_151); + return instruction_sequence; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_C2_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_C2_ops.c new file mode 100644 index 00000000000..adf581eb54f --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_C2_ops.c @@ -0,0 +1,1012 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// Pd = all8(Ps) +RzILOpEffect *hex_il_op_c2_all8(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Ps_op = ISA2REG(hi, 's', false); + RzILOpPure *Ps = READ_REG(pkt, Ps_op, false); + + // Pd = ((st8) ((((st32) Ps) == 0xff) ? 0xff : 0x0)); + RzILOpPure *op_EQ_4 = EQ(CAST(32, MSB(Ps), DUP(Ps)), SN(32, 0xff)); + RzILOpPure *cond_7 = ITE(op_EQ_4, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_7), DUP(cond_7))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_9; + return instruction_sequence; +} + +// Pd = and(Pt,Ps) +RzILOpEffect *hex_il_op_c2_and(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Ps_op = ISA2REG(hi, 's', false); + RzILOpPure *Ps = READ_REG(pkt, Ps_op, false); + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + + // Pd = ((st8) (((st32) Ps) & ((st32) Pt))); + RzILOpPure *op_AND_5 = LOGAND(CAST(32, MSB(Ps), DUP(Ps)), CAST(32, MSB(Pt), DUP(Pt))); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(op_AND_5), DUP(op_AND_5))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_7; + return instruction_sequence; +} + +// Pd = and(Pt,!Ps) +RzILOpEffect *hex_il_op_c2_andn(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + const HexOp *Ps_op = ISA2REG(hi, 's', false); + RzILOpPure *Ps = READ_REG(pkt, Ps_op, false); + + // Pd = ((st8) (((st32) Pt) & (~((st32) Ps)))); + RzILOpPure *op_NOT_4 = LOGNOT(CAST(32, MSB(Ps), DUP(Ps))); + RzILOpPure *op_AND_6 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), op_NOT_4); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(op_AND_6), DUP(op_AND_6))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// Pd = any8(Ps) +RzILOpEffect *hex_il_op_c2_any8(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Ps_op = ISA2REG(hi, 's', false); + RzILOpPure *Ps = READ_REG(pkt, Ps_op, false); + + // Pd = ((st8) (Ps ? 0xff : 0x0)); + RzILOpPure *cond_4 = ITE(NON_ZERO(Ps), SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_6 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_4), DUP(cond_4))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_6; + return instruction_sequence; +} + +// Pd = bitsclr(Rs,Rt) +RzILOpEffect *hex_il_op_c2_bitsclr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Pd = ((st8) (((Rs & Rt) == 0x0) ? 0xff : 0x0)); + RzILOpPure *op_AND_3 = LOGAND(Rs, Rt); + RzILOpPure *op_EQ_5 = EQ(op_AND_3, SN(32, 0)); + RzILOpPure *cond_8 = ITE(op_EQ_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_10; + return instruction_sequence; +} + +// Pd = bitsclr(Rs,Ii) +RzILOpEffect *hex_il_op_c2_bitsclri(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // Pd = ((st8) (((((ut32) Rs) & u) == ((ut32) 0x0)) ? 0xff : 0x0)); + RzILOpPure *op_AND_5 = LOGAND(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpPure *op_EQ_8 = EQ(op_AND_5, CAST(32, IL_FALSE, SN(32, 0))); + RzILOpPure *cond_11 = ITE(op_EQ_8, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_11), DUP(cond_11))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, op_ASSIGN_13); + return instruction_sequence; +} + +// Pd = bitsset(Rs,Rt) +RzILOpEffect *hex_il_op_c2_bitsset(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Pd = ((st8) (((Rs & Rt) == Rt) ? 0xff : 0x0)); + RzILOpPure *op_AND_3 = LOGAND(Rs, Rt); + RzILOpPure *op_EQ_4 = EQ(op_AND_3, DUP(Rt)); + RzILOpPure *cond_7 = ITE(op_EQ_4, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_7), DUP(cond_7))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_9; + return instruction_sequence; +} + +// if (!Pu) Rdd = combine(Rs,Rt) +RzILOpEffect *hex_il_op_c2_ccombinewf(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((st64) Rt) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_10 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_11 = LOGNOT(op_LSHIFT_10); + RzILOpPure *op_AND_12 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_11); + RzILOpPure *op_AND_16 = LOGAND(CAST(64, MSB(Rt), DUP(Rt)), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_20 = SHIFTL0(op_AND_16, SN(32, 0)); + RzILOpPure *op_OR_21 = LOGOR(op_AND_12, op_LSHIFT_20); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rdd_op, op_OR_21); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((st64) Rs) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_28 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_29 = LOGNOT(op_LSHIFT_28); + RzILOpPure *op_AND_30 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_29); + RzILOpPure *op_AND_34 = LOGAND(CAST(64, MSB(Rs), DUP(Rs)), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_38 = SHIFTL0(op_AND_34, SN(32, 0x20)); + RzILOpPure *op_OR_39 = LOGOR(op_AND_30, op_LSHIFT_38); + RzILOpEffect *op_ASSIGN_40 = WRITE_REG(bundle, Rdd_op, op_OR_39); + + // nop; + RzILOpEffect *nop_42 = NOP(); + + // seq(Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((st64) Rt) & 0xff ...; + RzILOpEffect *seq_then_43 = SEQN(2, op_ASSIGN_22, op_ASSIGN_40); + + // seq(nop); + RzILOpEffect *seq_else_44 = nop_42; + + // if (! (((st32) Pu) & 0x1)) {seq(Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((st64) Rt) & 0xff ...} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_45 = BRANCH(op_INV_4, seq_then_43, seq_else_44); + + RzILOpEffect *instruction_sequence = branch_45; + return instruction_sequence; +} + +// if (!Pu.new) Rdd = combine(Rs,Rt) +RzILOpEffect *hex_il_op_c2_ccombinewnewf(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_new_op = ISA2REG(hi, 'u', true); + RzILOpPure *Pu_new = READ_REG(pkt, Pu_new_op, true); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((st64) Rt) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_10 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_11 = LOGNOT(op_LSHIFT_10); + RzILOpPure *op_AND_12 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_11); + RzILOpPure *op_AND_16 = LOGAND(CAST(64, MSB(Rt), DUP(Rt)), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_20 = SHIFTL0(op_AND_16, SN(32, 0)); + RzILOpPure *op_OR_21 = LOGOR(op_AND_12, op_LSHIFT_20); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rdd_op, op_OR_21); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((st64) Rs) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_28 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_29 = LOGNOT(op_LSHIFT_28); + RzILOpPure *op_AND_30 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_29); + RzILOpPure *op_AND_34 = LOGAND(CAST(64, MSB(Rs), DUP(Rs)), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_38 = SHIFTL0(op_AND_34, SN(32, 0x20)); + RzILOpPure *op_OR_39 = LOGOR(op_AND_30, op_LSHIFT_38); + RzILOpEffect *op_ASSIGN_40 = WRITE_REG(bundle, Rdd_op, op_OR_39); + + // nop; + RzILOpEffect *nop_42 = NOP(); + + // seq(Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((st64) Rt) & 0xff ...; + RzILOpEffect *seq_then_43 = SEQN(2, op_ASSIGN_22, op_ASSIGN_40); + + // seq(nop); + RzILOpEffect *seq_else_44 = nop_42; + + // if (! (((st32) Pu_new) & 0x1)) {seq(Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((st64) Rt) & 0xff ...} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu_new), DUP(Pu_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_45 = BRANCH(op_INV_4, seq_then_43, seq_else_44); + + RzILOpEffect *instruction_sequence = branch_45; + return instruction_sequence; +} + +// if (Pu.new) Rdd = combine(Rs,Rt) +RzILOpEffect *hex_il_op_c2_ccombinewnewt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_new_op = ISA2REG(hi, 'u', true); + RzILOpPure *Pu_new = READ_REG(pkt, Pu_new_op, true); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((st64) Rt) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_9 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_10 = LOGNOT(op_LSHIFT_9); + RzILOpPure *op_AND_11 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_10); + RzILOpPure *op_AND_15 = LOGAND(CAST(64, MSB(Rt), DUP(Rt)), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_19 = SHIFTL0(op_AND_15, SN(32, 0)); + RzILOpPure *op_OR_20 = LOGOR(op_AND_11, op_LSHIFT_19); + RzILOpEffect *op_ASSIGN_21 = WRITE_REG(bundle, Rdd_op, op_OR_20); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((st64) Rs) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_27 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_28 = LOGNOT(op_LSHIFT_27); + RzILOpPure *op_AND_29 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_28); + RzILOpPure *op_AND_33 = LOGAND(CAST(64, MSB(Rs), DUP(Rs)), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_37 = SHIFTL0(op_AND_33, SN(32, 0x20)); + RzILOpPure *op_OR_38 = LOGOR(op_AND_29, op_LSHIFT_37); + RzILOpEffect *op_ASSIGN_39 = WRITE_REG(bundle, Rdd_op, op_OR_38); + + // nop; + RzILOpEffect *nop_41 = NOP(); + + // seq(Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((st64) Rt) & 0xff ...; + RzILOpEffect *seq_then_42 = SEQN(2, op_ASSIGN_21, op_ASSIGN_39); + + // seq(nop); + RzILOpEffect *seq_else_43 = nop_41; + + // if ((((st32) Pu_new) & 0x1)) {seq(Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((st64) Rt) & 0xff ...} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu_new), DUP(Pu_new)), SN(32, 1)); + RzILOpEffect *branch_44 = BRANCH(NON_ZERO(op_AND_3), seq_then_42, seq_else_43); + + RzILOpEffect *instruction_sequence = branch_44; + return instruction_sequence; +} + +// if (Pu) Rdd = combine(Rs,Rt) +RzILOpEffect *hex_il_op_c2_ccombinewt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((st64) Rt) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_9 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_10 = LOGNOT(op_LSHIFT_9); + RzILOpPure *op_AND_11 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_10); + RzILOpPure *op_AND_15 = LOGAND(CAST(64, MSB(Rt), DUP(Rt)), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_19 = SHIFTL0(op_AND_15, SN(32, 0)); + RzILOpPure *op_OR_20 = LOGOR(op_AND_11, op_LSHIFT_19); + RzILOpEffect *op_ASSIGN_21 = WRITE_REG(bundle, Rdd_op, op_OR_20); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((st64) Rs) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_27 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_28 = LOGNOT(op_LSHIFT_27); + RzILOpPure *op_AND_29 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_28); + RzILOpPure *op_AND_33 = LOGAND(CAST(64, MSB(Rs), DUP(Rs)), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_37 = SHIFTL0(op_AND_33, SN(32, 0x20)); + RzILOpPure *op_OR_38 = LOGOR(op_AND_29, op_LSHIFT_37); + RzILOpEffect *op_ASSIGN_39 = WRITE_REG(bundle, Rdd_op, op_OR_38); + + // nop; + RzILOpEffect *nop_41 = NOP(); + + // seq(Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((st64) Rt) & 0xff ...; + RzILOpEffect *seq_then_42 = SEQN(2, op_ASSIGN_21, op_ASSIGN_39); + + // seq(nop); + RzILOpEffect *seq_else_43 = nop_41; + + // if ((((st32) Pu) & 0x1)) {seq(Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((st64) Rt) & 0xff ...} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpEffect *branch_44 = BRANCH(NON_ZERO(op_AND_3), seq_then_42, seq_else_43); + + RzILOpEffect *instruction_sequence = branch_44; + return instruction_sequence; +} + +// if (!Pu) Rd = Ii +RzILOpEffect *hex_il_op_c2_cmoveif(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // Rd = s; + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rd_op, VARL("s")); + + // nop; + RzILOpEffect *nop_9 = NOP(); + + // seq(Rd = s); + RzILOpEffect *seq_then_10 = op_ASSIGN_8; + + // seq(nop); + RzILOpEffect *seq_else_11 = nop_9; + + // if (! (((st32) Pu) & 0x1)) {seq(Rd = s)} else {seq(nop)}; + RzILOpPure *op_AND_5 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpPure *op_INV_6 = INV(NON_ZERO(op_AND_5)); + RzILOpEffect *branch_12 = BRANCH(op_INV_6, seq_then_10, seq_else_11); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_0, branch_12); + return instruction_sequence; +} + +// if (Pu) Rd = Ii +RzILOpEffect *hex_il_op_c2_cmoveit(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // Rd = s; + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rd_op, VARL("s")); + + // nop; + RzILOpEffect *nop_8 = NOP(); + + // seq(Rd = s); + RzILOpEffect *seq_then_9 = op_ASSIGN_7; + + // seq(nop); + RzILOpEffect *seq_else_10 = nop_8; + + // if ((((st32) Pu) & 0x1)) {seq(Rd = s)} else {seq(nop)}; + RzILOpPure *op_AND_5 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpEffect *branch_11 = BRANCH(NON_ZERO(op_AND_5), seq_then_9, seq_else_10); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_0, branch_11); + return instruction_sequence; +} + +// if (!Pu.new) Rd = Ii +RzILOpEffect *hex_il_op_c2_cmovenewif(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Pu_new_op = ISA2REG(hi, 'u', true); + RzILOpPure *Pu_new = READ_REG(pkt, Pu_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // Rd = s; + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rd_op, VARL("s")); + + // nop; + RzILOpEffect *nop_9 = NOP(); + + // seq(Rd = s); + RzILOpEffect *seq_then_10 = op_ASSIGN_8; + + // seq(nop); + RzILOpEffect *seq_else_11 = nop_9; + + // if (! (((st32) Pu_new) & 0x1)) {seq(Rd = s)} else {seq(nop)}; + RzILOpPure *op_AND_5 = LOGAND(CAST(32, MSB(Pu_new), DUP(Pu_new)), SN(32, 1)); + RzILOpPure *op_INV_6 = INV(NON_ZERO(op_AND_5)); + RzILOpEffect *branch_12 = BRANCH(op_INV_6, seq_then_10, seq_else_11); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_0, branch_12); + return instruction_sequence; +} + +// if (Pu.new) Rd = Ii +RzILOpEffect *hex_il_op_c2_cmovenewit(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Pu_new_op = ISA2REG(hi, 'u', true); + RzILOpPure *Pu_new = READ_REG(pkt, Pu_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // Rd = s; + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rd_op, VARL("s")); + + // nop; + RzILOpEffect *nop_8 = NOP(); + + // seq(Rd = s); + RzILOpEffect *seq_then_9 = op_ASSIGN_7; + + // seq(nop); + RzILOpEffect *seq_else_10 = nop_8; + + // if ((((st32) Pu_new) & 0x1)) {seq(Rd = s)} else {seq(nop)}; + RzILOpPure *op_AND_5 = LOGAND(CAST(32, MSB(Pu_new), DUP(Pu_new)), SN(32, 1)); + RzILOpEffect *branch_11 = BRANCH(NON_ZERO(op_AND_5), seq_then_9, seq_else_10); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_0, branch_11); + return instruction_sequence; +} + +// Pd = cmp.eq(Rs,Rt) +RzILOpEffect *hex_il_op_c2_cmpeq(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Pd = ((st8) ((Rs == Rt) ? 0xff : 0x0)); + RzILOpPure *op_EQ_3 = EQ(Rs, Rt); + RzILOpPure *cond_6 = ITE(op_EQ_3, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_6), DUP(cond_6))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// Pd = cmp.eq(Rs,Ii) +RzILOpEffect *hex_il_op_c2_cmpeqi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // Pd = ((st8) ((Rs == s) ? 0xff : 0x0)); + RzILOpPure *op_EQ_4 = EQ(Rs, VARL("s")); + RzILOpPure *cond_7 = ITE(op_EQ_4, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_7), DUP(cond_7))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_0, op_ASSIGN_9); + return instruction_sequence; +} + +// Pd = cmp.eq(Rss,Rtt) +RzILOpEffect *hex_il_op_c2_cmpeqp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Pd = ((st8) ((Rss == Rtt) ? 0xff : 0x0)); + RzILOpPure *op_EQ_3 = EQ(Rss, Rtt); + RzILOpPure *cond_6 = ITE(op_EQ_3, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_6), DUP(cond_6))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// Pd = cmp.gt(Rs,Rt) +RzILOpEffect *hex_il_op_c2_cmpgt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Pd = ((st8) ((Rs > Rt) ? 0xff : 0x0)); + RzILOpPure *op_GT_3 = SGT(Rs, Rt); + RzILOpPure *cond_6 = ITE(op_GT_3, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_6), DUP(cond_6))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// Pd = cmp.gt(Rs,Ii) +RzILOpEffect *hex_il_op_c2_cmpgti(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // Pd = ((st8) ((Rs > s) ? 0xff : 0x0)); + RzILOpPure *op_GT_4 = SGT(Rs, VARL("s")); + RzILOpPure *cond_7 = ITE(op_GT_4, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_7), DUP(cond_7))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_0, op_ASSIGN_9); + return instruction_sequence; +} + +// Pd = cmp.gt(Rss,Rtt) +RzILOpEffect *hex_il_op_c2_cmpgtp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Pd = ((st8) ((Rss > Rtt) ? 0xff : 0x0)); + RzILOpPure *op_GT_3 = SGT(Rss, Rtt); + RzILOpPure *cond_6 = ITE(op_GT_3, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_6), DUP(cond_6))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// Pd = cmp.gtu(Rs,Rt) +RzILOpEffect *hex_il_op_c2_cmpgtu(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Pd = ((st8) ((((ut32) Rs) > ((ut32) Rt)) ? 0xff : 0x0)); + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Rs), CAST(32, IL_FALSE, Rt)); + RzILOpPure *cond_8 = ITE(op_GT_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_10; + return instruction_sequence; +} + +// Pd = cmp.gtu(Rs,Ii) +RzILOpEffect *hex_il_op_c2_cmpgtui(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // Pd = ((st8) ((((ut32) Rs) > u) ? 0xff : 0x0)); + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpPure *cond_8 = ITE(op_GT_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_0, op_ASSIGN_10); + return instruction_sequence; +} + +// Pd = cmp.gtu(Rss,Rtt) +RzILOpEffect *hex_il_op_c2_cmpgtup(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Pd = ((st8) ((((ut64) Rss) > ((ut64) Rtt)) ? 0xff : 0x0)); + RzILOpPure *op_GT_5 = UGT(CAST(64, IL_FALSE, Rss), CAST(64, IL_FALSE, Rtt)); + RzILOpPure *cond_8 = ITE(op_GT_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_10; + return instruction_sequence; +} + +// Rdd = mask(Pt) +RzILOpEffect *hex_il_op_c2_mask(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp153 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp153", VARL("i")); + + // seq(h_tmp153 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x8)))) | (((ut64) (((st64) ((((st32) (Pt >> i)) & 0x1) ? 0xff : 0x0)) & 0xff)) << i * 0x8))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Pt, VARL("i")); + RzILOpPure *op_AND_20 = LOGAND(CAST(32, MSB(op_RSHIFT_17), DUP(op_RSHIFT_17)), SN(32, 1)); + RzILOpPure *cond_23 = ITE(NON_ZERO(op_AND_20), SN(32, 0xff), SN(32, 0)); + RzILOpPure *op_AND_26 = LOGAND(CAST(64, MSB(cond_23), DUP(cond_23)), SN(64, 0xff)); + RzILOpPure *op_MUL_29 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_30 = SHIFTL0(CAST(64, IL_FALSE, op_AND_26), op_MUL_29); + RzILOpPure *op_OR_32 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_30); + RzILOpEffect *op_ASSIGN_34 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_32)); + + // seq(h_tmp153; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x8)) ...; + RzILOpEffect *seq_36 = op_ASSIGN_34; + + // seq(seq(h_tmp153; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0 ...; + RzILOpEffect *seq_37 = SEQN(2, seq_36, seq_8); + + // while ((i < 0x8)) { seq(seq(h_tmp153; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0 ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 8)); + RzILOpEffect *for_38 = REPEAT(op_LT_4, seq_37); + + // seq(i = 0x0; while ((i < 0x8)) { seq(seq(h_tmp153; Rdd = ((st64) ...; + RzILOpEffect *seq_39 = SEQN(2, op_ASSIGN_2, for_38); + + RzILOpEffect *instruction_sequence = seq_39; + return instruction_sequence; +} + +// Rd = mux(Pu,Rs,Rt) +RzILOpEffect *hex_il_op_c2_mux(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((((st32) Pu) & 0x1) ? Rs : Rt); + RzILOpPure *op_AND_4 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpPure *cond_7 = ITE(NON_ZERO(op_AND_4), Rs, Rt); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rd_op, cond_7); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// Rd = mux(Pu,Ii,II) +RzILOpEffect *hex_il_op_c2_muxii(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + RzILOpPure *S = SN(32, (st32)ISA2IMM(hi, 'S')); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // S = S; + RzILOpEffect *imm_assign_7 = SETL("S", S); + + // Rd = ((((st32) Pu) & 0x1) ? s : S); + RzILOpPure *op_AND_6 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpPure *cond_9 = ITE(NON_ZERO(op_AND_6), VARL("s"), VARL("S")); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, Rd_op, cond_9); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, imm_assign_7, op_ASSIGN_10); + return instruction_sequence; +} + +// Rd = mux(Pu,Rs,Ii) +RzILOpEffect *hex_il_op_c2_muxir(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // Rd = ((((st32) Pu) & 0x1) ? Rs : s); + RzILOpPure *op_AND_6 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpPure *cond_8 = ITE(NON_ZERO(op_AND_6), Rs, VARL("s")); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rd_op, cond_8); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_0, op_ASSIGN_9); + return instruction_sequence; +} + +// Rd = mux(Pu,Ii,Rs) +RzILOpEffect *hex_il_op_c2_muxri(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // Rd = ((((st32) Pu) & 0x1) ? s : Rs); + RzILOpPure *op_AND_6 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpPure *cond_8 = ITE(NON_ZERO(op_AND_6), VARL("s"), Rs); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rd_op, cond_8); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_0, op_ASSIGN_9); + return instruction_sequence; +} + +// Pd = not(Ps) +RzILOpEffect *hex_il_op_c2_not(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Ps_op = ISA2REG(hi, 's', false); + RzILOpPure *Ps = READ_REG(pkt, Ps_op, false); + + // Pd = ((st8) (~((st32) Ps))); + RzILOpPure *op_NOT_3 = LOGNOT(CAST(32, MSB(Ps), DUP(Ps))); + RzILOpEffect *op_ASSIGN_5 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(op_NOT_3), DUP(op_NOT_3))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_5; + return instruction_sequence; +} + +// Pd = or(Pt,Ps) +RzILOpEffect *hex_il_op_c2_or(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Ps_op = ISA2REG(hi, 's', false); + RzILOpPure *Ps = READ_REG(pkt, Ps_op, false); + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + + // Pd = ((st8) (((st32) Ps) | ((st32) Pt))); + RzILOpPure *op_OR_5 = LOGOR(CAST(32, MSB(Ps), DUP(Ps)), CAST(32, MSB(Pt), DUP(Pt))); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(op_OR_5), DUP(op_OR_5))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_7; + return instruction_sequence; +} + +// Pd = or(Pt,!Ps) +RzILOpEffect *hex_il_op_c2_orn(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + const HexOp *Ps_op = ISA2REG(hi, 's', false); + RzILOpPure *Ps = READ_REG(pkt, Ps_op, false); + + // Pd = ((st8) (((st32) Pt) | (~((st32) Ps)))); + RzILOpPure *op_NOT_4 = LOGNOT(CAST(32, MSB(Ps), DUP(Ps))); + RzILOpPure *op_OR_6 = LOGOR(CAST(32, MSB(Pt), DUP(Pt)), op_NOT_4); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(op_OR_6), DUP(op_OR_6))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// Rd = Ps +RzILOpEffect *hex_il_op_c2_tfrpr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Ps_op = ISA2REG(hi, 's', false); + RzILOpPure *Ps = READ_REG(pkt, Ps_op, false); + + // Rd = ((st32) extract64(((ut64) Ps), 0x0, 0x8)); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, EXTRACT64(CAST(64, IL_FALSE, Ps), SN(32, 0), SN(32, 8)))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_11; + return instruction_sequence; +} + +// Pd = Rs +RzILOpEffect *hex_il_op_c2_tfrrp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Pd = ((st8) ((ut8) ((Rs >> 0x0) & 0xff))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xff)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, Pd_op, CAST(8, IL_FALSE, CAST(8, IL_FALSE, op_AND_7))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_10; + return instruction_sequence; +} + +// Rd = vitpack(Ps,Pt) +RzILOpEffect *hex_il_op_c2_vitpack(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Ps_op = ISA2REG(hi, 's', false); + RzILOpPure *Ps = READ_REG(pkt, Ps_op, false); + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + + // Rd = ((((st32) Ps) & 0x55) | (((st32) Pt) & 0xaa)); + RzILOpPure *op_AND_4 = LOGAND(CAST(32, MSB(Ps), DUP(Ps)), SN(32, 0x55)); + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 0xaa)); + RzILOpPure *op_OR_9 = LOGOR(op_AND_4, op_AND_8); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, Rd_op, op_OR_9); + + RzILOpEffect *instruction_sequence = op_ASSIGN_10; + return instruction_sequence; +} + +// Rdd = vmux(Pu,Rss,Rtt) +RzILOpEffect *hex_il_op_c2_vmux(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp154 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp154", VARL("i")); + + // seq(h_tmp154 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x8)))) | (((ut64) (((st64) ((st32) ((((st32) (Pu >> i)) & 0x1) ? ((st8) ((Rss >> i * 0x8) & ((st64) 0xff))) : ((st8) ((Rtt >> i * 0x8) & ((st64) 0xff)))))) & 0xff)) << i * 0x8))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Pu, VARL("i")); + RzILOpPure *op_AND_20 = LOGAND(CAST(32, MSB(op_RSHIFT_17), DUP(op_RSHIFT_17)), SN(32, 1)); + RzILOpPure *op_MUL_23 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_24 = SHIFTRA(Rss, op_MUL_23); + RzILOpPure *op_AND_27 = LOGAND(op_RSHIFT_24, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_31 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_32 = SHIFTRA(Rtt, op_MUL_31); + RzILOpPure *op_AND_35 = LOGAND(op_RSHIFT_32, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *cond_37 = ITE(NON_ZERO(op_AND_20), CAST(8, MSB(op_AND_27), DUP(op_AND_27)), CAST(8, MSB(op_AND_35), DUP(op_AND_35))); + RzILOpPure *op_AND_41 = LOGAND(CAST(64, MSB(CAST(32, MSB(cond_37), DUP(cond_37))), CAST(32, MSB(DUP(cond_37)), DUP(cond_37))), SN(64, 0xff)); + RzILOpPure *op_MUL_44 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_45 = SHIFTL0(CAST(64, IL_FALSE, op_AND_41), op_MUL_44); + RzILOpPure *op_OR_47 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_45); + RzILOpEffect *op_ASSIGN_49 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_47)); + + // seq(h_tmp154; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x8)) ...; + RzILOpEffect *seq_51 = op_ASSIGN_49; + + // seq(seq(h_tmp154; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0 ...; + RzILOpEffect *seq_52 = SEQN(2, seq_51, seq_8); + + // while ((i < 0x8)) { seq(seq(h_tmp154; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0 ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 8)); + RzILOpEffect *for_53 = REPEAT(op_LT_4, seq_52); + + // seq(i = 0x0; while ((i < 0x8)) { seq(seq(h_tmp154; Rdd = ((st64) ...; + RzILOpEffect *seq_54 = SEQN(2, op_ASSIGN_2, for_53); + + RzILOpEffect *instruction_sequence = seq_54; + return instruction_sequence; +} + +// Pd = xor(Ps,Pt) +RzILOpEffect *hex_il_op_c2_xor(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Ps_op = ISA2REG(hi, 's', false); + RzILOpPure *Ps = READ_REG(pkt, Ps_op, false); + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + + // Pd = ((st8) (((st32) Ps) ^ ((st32) Pt))); + RzILOpPure *op_XOR_5 = LOGXOR(CAST(32, MSB(Ps), DUP(Ps)), CAST(32, MSB(Pt), DUP(Pt))); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(op_XOR_5), DUP(op_XOR_5))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_7; + return instruction_sequence; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_C4_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_C4_ops.c new file mode 100644 index 00000000000..4b4becc9330 --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_C4_ops.c @@ -0,0 +1,562 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// Rd = add(pc,Ii) +RzILOpEffect *hex_il_op_c4_addipc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + RzILOpPure *pc = U32(pkt->pkt_addr); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // Rd = ((st32) pc + u); + RzILOpPure *op_ADD_4 = ADD(pc, VARL("u")); + RzILOpEffect *op_ASSIGN_6 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_ADD_4)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, op_ASSIGN_6); + return instruction_sequence; +} + +// Pd = and(Ps,and(Pt,Pu)) +RzILOpEffect *hex_il_op_c4_and_and(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Ps_op = ISA2REG(hi, 's', false); + RzILOpPure *Ps = READ_REG(pkt, Ps_op, false); + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + + // Pd = ((st8) ((((st32) Ps) & ((st32) Pt)) & ((st32) Pu))); + RzILOpPure *op_AND_5 = LOGAND(CAST(32, MSB(Ps), DUP(Ps)), CAST(32, MSB(Pt), DUP(Pt))); + RzILOpPure *op_AND_8 = LOGAND(op_AND_5, CAST(32, MSB(Pu), DUP(Pu))); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(op_AND_8), DUP(op_AND_8))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_10; + return instruction_sequence; +} + +// Pd = and(Ps,and(Pt,!Pu)) +RzILOpEffect *hex_il_op_c4_and_andn(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Ps_op = ISA2REG(hi, 's', false); + RzILOpPure *Ps = READ_REG(pkt, Ps_op, false); + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + + // Pd = ((st8) ((((st32) Ps) & ((st32) Pt)) & (~((st32) Pu)))); + RzILOpPure *op_AND_5 = LOGAND(CAST(32, MSB(Ps), DUP(Ps)), CAST(32, MSB(Pt), DUP(Pt))); + RzILOpPure *op_NOT_8 = LOGNOT(CAST(32, MSB(Pu), DUP(Pu))); + RzILOpPure *op_AND_9 = LOGAND(op_AND_5, op_NOT_8); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(op_AND_9), DUP(op_AND_9))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_11; + return instruction_sequence; +} + +// Pd = and(Ps,or(Pt,Pu)) +RzILOpEffect *hex_il_op_c4_and_or(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Ps_op = ISA2REG(hi, 's', false); + RzILOpPure *Ps = READ_REG(pkt, Ps_op, false); + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + + // Pd = ((st8) (((st32) Ps) & (((st32) Pt) | ((st32) Pu)))); + RzILOpPure *op_OR_6 = LOGOR(CAST(32, MSB(Pt), DUP(Pt)), CAST(32, MSB(Pu), DUP(Pu))); + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Ps), DUP(Ps)), op_OR_6); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(op_AND_8), DUP(op_AND_8))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_10; + return instruction_sequence; +} + +// Pd = and(Ps,or(Pt,!Pu)) +RzILOpEffect *hex_il_op_c4_and_orn(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Ps_op = ISA2REG(hi, 's', false); + RzILOpPure *Ps = READ_REG(pkt, Ps_op, false); + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + + // Pd = ((st8) (((st32) Ps) & (((st32) Pt) | (~((st32) Pu))))); + RzILOpPure *op_NOT_5 = LOGNOT(CAST(32, MSB(Pu), DUP(Pu))); + RzILOpPure *op_OR_7 = LOGOR(CAST(32, MSB(Pt), DUP(Pt)), op_NOT_5); + RzILOpPure *op_AND_9 = LOGAND(CAST(32, MSB(Ps), DUP(Ps)), op_OR_7); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(op_AND_9), DUP(op_AND_9))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_11; + return instruction_sequence; +} + +// Pd = !cmp.gt(Rs,Rt) +RzILOpEffect *hex_il_op_c4_cmplte(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Pd = ((st8) ((Rs <= Rt) ? 0xff : 0x0)); + RzILOpPure *op_LE_3 = SLE(Rs, Rt); + RzILOpPure *cond_6 = ITE(op_LE_3, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_6), DUP(cond_6))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// Pd = !cmp.gt(Rs,Ii) +RzILOpEffect *hex_il_op_c4_cmpltei(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // Pd = ((st8) ((Rs <= s) ? 0xff : 0x0)); + RzILOpPure *op_LE_4 = SLE(Rs, VARL("s")); + RzILOpPure *cond_7 = ITE(op_LE_4, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_7), DUP(cond_7))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_0, op_ASSIGN_9); + return instruction_sequence; +} + +// Pd = !cmp.gtu(Rs,Rt) +RzILOpEffect *hex_il_op_c4_cmplteu(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Pd = ((st8) ((((ut32) Rs) <= ((ut32) Rt)) ? 0xff : 0x0)); + RzILOpPure *op_LE_5 = ULE(CAST(32, IL_FALSE, Rs), CAST(32, IL_FALSE, Rt)); + RzILOpPure *cond_8 = ITE(op_LE_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_10; + return instruction_sequence; +} + +// Pd = !cmp.gtu(Rs,Ii) +RzILOpEffect *hex_il_op_c4_cmplteui(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // Pd = ((st8) ((((ut32) Rs) <= u) ? 0xff : 0x0)); + RzILOpPure *op_LE_5 = ULE(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpPure *cond_8 = ITE(op_LE_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_0, op_ASSIGN_10); + return instruction_sequence; +} + +// Pd = !cmp.eq(Rs,Rt) +RzILOpEffect *hex_il_op_c4_cmpneq(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Pd = ((st8) ((Rs != Rt) ? 0xff : 0x0)); + RzILOpPure *op_NE_3 = INV(EQ(Rs, Rt)); + RzILOpPure *cond_6 = ITE(op_NE_3, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_6), DUP(cond_6))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// Pd = !cmp.eq(Rs,Ii) +RzILOpEffect *hex_il_op_c4_cmpneqi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // Pd = ((st8) ((Rs != s) ? 0xff : 0x0)); + RzILOpPure *op_NE_4 = INV(EQ(Rs, VARL("s"))); + RzILOpPure *cond_7 = ITE(op_NE_4, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_7), DUP(cond_7))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_0, op_ASSIGN_9); + return instruction_sequence; +} + +// Pd = fastcorner9(Ps,Pt) +RzILOpEffect *hex_il_op_c4_fastcorner9(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 tmp; + // Declare: ut32 i; + const HexOp *Ps_op = ISA2REG(hi, 's', false); + RzILOpPure *Ps = READ_REG(pkt, Ps_op, false); + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + + // tmp = ((ut32) 0x0); + RzILOpEffect *op_ASSIGN_2 = SETL("tmp", CAST(32, IL_FALSE, SN(32, 0))); + + // tmp = ((ut32) (((ut64) (((st64) tmp) & (~(0xffff << 0x0)))) | (((ut64) ((((st32) (Ps << 0x8)) | ((st32) Pt)) & 0xffff)) << 0x0))); + RzILOpPure *op_LSHIFT_9 = SHIFTL0(SN(64, 0xffff), SN(32, 0)); + RzILOpPure *op_NOT_10 = LOGNOT(op_LSHIFT_9); + RzILOpPure *op_AND_12 = LOGAND(CAST(64, IL_FALSE, VARL("tmp")), op_NOT_10); + RzILOpPure *op_LSHIFT_15 = SHIFTL0(Ps, SN(32, 8)); + RzILOpPure *op_OR_19 = LOGOR(CAST(32, MSB(op_LSHIFT_15), DUP(op_LSHIFT_15)), CAST(32, MSB(Pt), DUP(Pt))); + RzILOpPure *op_AND_21 = LOGAND(op_OR_19, SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_26 = SHIFTL0(CAST(64, IL_FALSE, op_AND_21), SN(32, 0)); + RzILOpPure *op_OR_28 = LOGOR(CAST(64, IL_FALSE, op_AND_12), op_LSHIFT_26); + RzILOpEffect *op_ASSIGN_30 = SETL("tmp", CAST(32, IL_FALSE, op_OR_28)); + + // tmp = ((ut32) (((ut64) (((st64) tmp) & (~(0xffff << 0x10)))) | (((ut64) ((((st32) (Ps << 0x8)) | ((st32) Pt)) & 0xffff)) << 0x10))); + RzILOpPure *op_LSHIFT_36 = SHIFTL0(SN(64, 0xffff), SN(32, 16)); + RzILOpPure *op_NOT_37 = LOGNOT(op_LSHIFT_36); + RzILOpPure *op_AND_39 = LOGAND(CAST(64, IL_FALSE, VARL("tmp")), op_NOT_37); + RzILOpPure *op_LSHIFT_41 = SHIFTL0(DUP(Ps), SN(32, 8)); + RzILOpPure *op_OR_44 = LOGOR(CAST(32, MSB(op_LSHIFT_41), DUP(op_LSHIFT_41)), CAST(32, MSB(DUP(Pt)), DUP(Pt))); + RzILOpPure *op_AND_46 = LOGAND(op_OR_44, SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_51 = SHIFTL0(CAST(64, IL_FALSE, op_AND_46), SN(32, 16)); + RzILOpPure *op_OR_53 = LOGOR(CAST(64, IL_FALSE, op_AND_39), op_LSHIFT_51); + RzILOpEffect *op_ASSIGN_55 = SETL("tmp", CAST(32, IL_FALSE, op_OR_53)); + + // i = ((ut32) 0x1); + RzILOpEffect *op_ASSIGN_59 = SETL("i", CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(++i); + RzILOpEffect *op_INC_63 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp155 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_65 = SETL("h_tmp155", VARL("i")); + + // seq(h_tmp155 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_66 = SEQN(2, op_ASSIGN_hybrid_tmp_65, op_INC_63); + + // tmp = (tmp & (tmp >> 0x1)); + RzILOpPure *op_RSHIFT_68 = SHIFTR0(VARL("tmp"), SN(32, 1)); + RzILOpPure *op_AND_69 = LOGAND(VARL("tmp"), op_RSHIFT_68); + RzILOpEffect *op_ASSIGN_AND_70 = SETL("tmp", op_AND_69); + + // seq(h_tmp155; tmp = (tmp & (tmp >> 0x1))); + RzILOpEffect *seq_71 = op_ASSIGN_AND_70; + + // seq(seq(h_tmp155; tmp = (tmp & (tmp >> 0x1))); seq(h_tmp155 = HY ...; + RzILOpEffect *seq_72 = SEQN(2, seq_71, seq_66); + + // while ((i < ((ut32) 0x9))) { seq(seq(h_tmp155; tmp = (tmp & (tmp >> 0x1))); seq(h_tmp155 = HY ... }; + RzILOpPure *op_LT_62 = ULT(VARL("i"), CAST(32, IL_FALSE, SN(32, 9))); + RzILOpEffect *for_73 = REPEAT(op_LT_62, seq_72); + + // seq(i = ((ut32) 0x1); while ((i < ((ut32) 0x9))) { seq(seq(h_tmp ...; + RzILOpEffect *seq_74 = SEQN(2, op_ASSIGN_59, for_73); + + // Pd = ((st8) ((tmp != ((ut32) 0x0)) ? 0xff : 0x0)); + RzILOpPure *op_NE_78 = INV(EQ(VARL("tmp"), CAST(32, IL_FALSE, SN(32, 0)))); + RzILOpPure *cond_81 = ITE(op_NE_78, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_83 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_81), DUP(cond_81))); + + RzILOpEffect *instruction_sequence = SEQN(5, op_ASSIGN_2, op_ASSIGN_30, op_ASSIGN_55, seq_74, op_ASSIGN_83); + return instruction_sequence; +} + +// Pd = !fastcorner9(Ps,Pt) +RzILOpEffect *hex_il_op_c4_fastcorner9_not(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 tmp; + // Declare: ut32 i; + const HexOp *Ps_op = ISA2REG(hi, 's', false); + RzILOpPure *Ps = READ_REG(pkt, Ps_op, false); + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + + // tmp = ((ut32) 0x0); + RzILOpEffect *op_ASSIGN_2 = SETL("tmp", CAST(32, IL_FALSE, SN(32, 0))); + + // tmp = ((ut32) (((ut64) (((st64) tmp) & (~(0xffff << 0x0)))) | (((ut64) ((((st32) (Ps << 0x8)) | ((st32) Pt)) & 0xffff)) << 0x0))); + RzILOpPure *op_LSHIFT_9 = SHIFTL0(SN(64, 0xffff), SN(32, 0)); + RzILOpPure *op_NOT_10 = LOGNOT(op_LSHIFT_9); + RzILOpPure *op_AND_12 = LOGAND(CAST(64, IL_FALSE, VARL("tmp")), op_NOT_10); + RzILOpPure *op_LSHIFT_15 = SHIFTL0(Ps, SN(32, 8)); + RzILOpPure *op_OR_19 = LOGOR(CAST(32, MSB(op_LSHIFT_15), DUP(op_LSHIFT_15)), CAST(32, MSB(Pt), DUP(Pt))); + RzILOpPure *op_AND_21 = LOGAND(op_OR_19, SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_26 = SHIFTL0(CAST(64, IL_FALSE, op_AND_21), SN(32, 0)); + RzILOpPure *op_OR_28 = LOGOR(CAST(64, IL_FALSE, op_AND_12), op_LSHIFT_26); + RzILOpEffect *op_ASSIGN_30 = SETL("tmp", CAST(32, IL_FALSE, op_OR_28)); + + // tmp = ((ut32) (((ut64) (((st64) tmp) & (~(0xffff << 0x10)))) | (((ut64) ((((st32) (Ps << 0x8)) | ((st32) Pt)) & 0xffff)) << 0x10))); + RzILOpPure *op_LSHIFT_36 = SHIFTL0(SN(64, 0xffff), SN(32, 16)); + RzILOpPure *op_NOT_37 = LOGNOT(op_LSHIFT_36); + RzILOpPure *op_AND_39 = LOGAND(CAST(64, IL_FALSE, VARL("tmp")), op_NOT_37); + RzILOpPure *op_LSHIFT_41 = SHIFTL0(DUP(Ps), SN(32, 8)); + RzILOpPure *op_OR_44 = LOGOR(CAST(32, MSB(op_LSHIFT_41), DUP(op_LSHIFT_41)), CAST(32, MSB(DUP(Pt)), DUP(Pt))); + RzILOpPure *op_AND_46 = LOGAND(op_OR_44, SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_51 = SHIFTL0(CAST(64, IL_FALSE, op_AND_46), SN(32, 16)); + RzILOpPure *op_OR_53 = LOGOR(CAST(64, IL_FALSE, op_AND_39), op_LSHIFT_51); + RzILOpEffect *op_ASSIGN_55 = SETL("tmp", CAST(32, IL_FALSE, op_OR_53)); + + // i = ((ut32) 0x1); + RzILOpEffect *op_ASSIGN_59 = SETL("i", CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(++i); + RzILOpEffect *op_INC_63 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp156 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_65 = SETL("h_tmp156", VARL("i")); + + // seq(h_tmp156 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_66 = SEQN(2, op_ASSIGN_hybrid_tmp_65, op_INC_63); + + // tmp = (tmp & (tmp >> 0x1)); + RzILOpPure *op_RSHIFT_68 = SHIFTR0(VARL("tmp"), SN(32, 1)); + RzILOpPure *op_AND_69 = LOGAND(VARL("tmp"), op_RSHIFT_68); + RzILOpEffect *op_ASSIGN_AND_70 = SETL("tmp", op_AND_69); + + // seq(h_tmp156; tmp = (tmp & (tmp >> 0x1))); + RzILOpEffect *seq_71 = op_ASSIGN_AND_70; + + // seq(seq(h_tmp156; tmp = (tmp & (tmp >> 0x1))); seq(h_tmp156 = HY ...; + RzILOpEffect *seq_72 = SEQN(2, seq_71, seq_66); + + // while ((i < ((ut32) 0x9))) { seq(seq(h_tmp156; tmp = (tmp & (tmp >> 0x1))); seq(h_tmp156 = HY ... }; + RzILOpPure *op_LT_62 = ULT(VARL("i"), CAST(32, IL_FALSE, SN(32, 9))); + RzILOpEffect *for_73 = REPEAT(op_LT_62, seq_72); + + // seq(i = ((ut32) 0x1); while ((i < ((ut32) 0x9))) { seq(seq(h_tmp ...; + RzILOpEffect *seq_74 = SEQN(2, op_ASSIGN_59, for_73); + + // Pd = ((st8) ((tmp == ((ut32) 0x0)) ? 0xff : 0x0)); + RzILOpPure *op_EQ_78 = EQ(VARL("tmp"), CAST(32, IL_FALSE, SN(32, 0))); + RzILOpPure *cond_81 = ITE(op_EQ_78, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_83 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_81), DUP(cond_81))); + + RzILOpEffect *instruction_sequence = SEQN(5, op_ASSIGN_2, op_ASSIGN_30, op_ASSIGN_55, seq_74, op_ASSIGN_83); + return instruction_sequence; +} + +// Pd = !bitsclr(Rs,Rt) +RzILOpEffect *hex_il_op_c4_nbitsclr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Pd = ((st8) (((Rs & Rt) != 0x0) ? 0xff : 0x0)); + RzILOpPure *op_AND_3 = LOGAND(Rs, Rt); + RzILOpPure *op_NE_5 = INV(EQ(op_AND_3, SN(32, 0))); + RzILOpPure *cond_8 = ITE(op_NE_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_10; + return instruction_sequence; +} + +// Pd = !bitsclr(Rs,Ii) +RzILOpEffect *hex_il_op_c4_nbitsclri(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // Pd = ((st8) (((((ut32) Rs) & u) != ((ut32) 0x0)) ? 0xff : 0x0)); + RzILOpPure *op_AND_5 = LOGAND(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpPure *op_NE_8 = INV(EQ(op_AND_5, CAST(32, IL_FALSE, SN(32, 0)))); + RzILOpPure *cond_11 = ITE(op_NE_8, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_11), DUP(cond_11))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, op_ASSIGN_13); + return instruction_sequence; +} + +// Pd = !bitsset(Rs,Rt) +RzILOpEffect *hex_il_op_c4_nbitsset(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Pd = ((st8) (((Rs & Rt) != Rt) ? 0xff : 0x0)); + RzILOpPure *op_AND_3 = LOGAND(Rs, Rt); + RzILOpPure *op_NE_4 = INV(EQ(op_AND_3, DUP(Rt))); + RzILOpPure *cond_7 = ITE(op_NE_4, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_7), DUP(cond_7))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_9; + return instruction_sequence; +} + +// Pd = or(Ps,and(Pt,Pu)) +RzILOpEffect *hex_il_op_c4_or_and(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Ps_op = ISA2REG(hi, 's', false); + RzILOpPure *Ps = READ_REG(pkt, Ps_op, false); + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + + // Pd = ((st8) (((st32) Ps) | (((st32) Pt) & ((st32) Pu)))); + RzILOpPure *op_AND_6 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), CAST(32, MSB(Pu), DUP(Pu))); + RzILOpPure *op_OR_8 = LOGOR(CAST(32, MSB(Ps), DUP(Ps)), op_AND_6); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(op_OR_8), DUP(op_OR_8))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_10; + return instruction_sequence; +} + +// Pd = or(Ps,and(Pt,!Pu)) +RzILOpEffect *hex_il_op_c4_or_andn(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Ps_op = ISA2REG(hi, 's', false); + RzILOpPure *Ps = READ_REG(pkt, Ps_op, false); + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + + // Pd = ((st8) (((st32) Ps) | (((st32) Pt) & (~((st32) Pu))))); + RzILOpPure *op_NOT_5 = LOGNOT(CAST(32, MSB(Pu), DUP(Pu))); + RzILOpPure *op_AND_7 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), op_NOT_5); + RzILOpPure *op_OR_9 = LOGOR(CAST(32, MSB(Ps), DUP(Ps)), op_AND_7); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(op_OR_9), DUP(op_OR_9))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_11; + return instruction_sequence; +} + +// Pd = or(Ps,or(Pt,Pu)) +RzILOpEffect *hex_il_op_c4_or_or(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Ps_op = ISA2REG(hi, 's', false); + RzILOpPure *Ps = READ_REG(pkt, Ps_op, false); + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + + // Pd = ((st8) ((((st32) Ps) | ((st32) Pt)) | ((st32) Pu))); + RzILOpPure *op_OR_5 = LOGOR(CAST(32, MSB(Ps), DUP(Ps)), CAST(32, MSB(Pt), DUP(Pt))); + RzILOpPure *op_OR_8 = LOGOR(op_OR_5, CAST(32, MSB(Pu), DUP(Pu))); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(op_OR_8), DUP(op_OR_8))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_10; + return instruction_sequence; +} + +// Pd = or(Ps,or(Pt,!Pu)) +RzILOpEffect *hex_il_op_c4_or_orn(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Ps_op = ISA2REG(hi, 's', false); + RzILOpPure *Ps = READ_REG(pkt, Ps_op, false); + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + + // Pd = ((st8) ((((st32) Ps) | ((st32) Pt)) | (~((st32) Pu)))); + RzILOpPure *op_OR_5 = LOGOR(CAST(32, MSB(Ps), DUP(Ps)), CAST(32, MSB(Pt), DUP(Pt))); + RzILOpPure *op_NOT_8 = LOGNOT(CAST(32, MSB(Pu), DUP(Pu))); + RzILOpPure *op_OR_9 = LOGOR(op_OR_5, op_NOT_8); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(op_OR_9), DUP(op_OR_9))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_11; + return instruction_sequence; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_F2_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_F2_ops.c new file mode 100644 index 00000000000..ab2b3db7ef8 --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_F2_ops.c @@ -0,0 +1,754 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// Rdd = convert_d2df(Rss) +RzILOpEffect *hex_il_op_f2_conv_d2df(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // Rdd = ((st64) fUNDOUBLE(HEX_SINT_TO_D(HEX_GET_INSN_RMODE(hi), Rss))); + RzILOpEffect *op_ASSIGN_6 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, F2BV(HEX_SINT_TO_D(HEX_GET_INSN_RMODE(hi), Rss)))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_6; + return instruction_sequence; +} + +// Rd = convert_d2sf(Rss) +RzILOpEffect *hex_il_op_f2_conv_d2sf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rdd = convert_df2d(Rss) +RzILOpEffect *hex_il_op_f2_conv_df2d(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // Rdd = ((st64) HEX_D_TO_SINT(HEX_GET_INSN_RMODE(hi), DOUBLE(RZ_FLOAT_IEEE754_BIN_64, ((ut64) Rss)))); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, HEX_D_TO_SINT(HEX_GET_INSN_RMODE(hi), BV2F(RZ_FLOAT_IEEE754_BIN_64, CAST(64, IL_FALSE, Rss))))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_7; + return instruction_sequence; +} + +// Rdd = convert_df2d(Rss):chop +RzILOpEffect *hex_il_op_f2_conv_df2d_chop(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // Rdd = ((st64) HEX_D_TO_SINT(HEX_GET_INSN_RMODE(hi), DOUBLE(RZ_FLOAT_IEEE754_BIN_64, ((ut64) Rss)))); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, HEX_D_TO_SINT(HEX_GET_INSN_RMODE(hi), BV2F(RZ_FLOAT_IEEE754_BIN_64, CAST(64, IL_FALSE, Rss))))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// Rd = convert_df2sf(Rss) +RzILOpEffect *hex_il_op_f2_conv_df2sf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rdd = convert_df2ud(Rss) +RzILOpEffect *hex_il_op_f2_conv_df2ud(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // Rdd = ((st64) HEX_D_TO_INT(HEX_GET_INSN_RMODE(hi), DOUBLE(RZ_FLOAT_IEEE754_BIN_64, ((ut64) Rss)))); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, HEX_D_TO_INT(HEX_GET_INSN_RMODE(hi), BV2F(RZ_FLOAT_IEEE754_BIN_64, CAST(64, IL_FALSE, Rss))))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_7; + return instruction_sequence; +} + +// Rdd = convert_df2ud(Rss):chop +RzILOpEffect *hex_il_op_f2_conv_df2ud_chop(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // Rdd = ((st64) HEX_D_TO_INT(HEX_GET_INSN_RMODE(hi), DOUBLE(RZ_FLOAT_IEEE754_BIN_64, ((ut64) Rss)))); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, HEX_D_TO_INT(HEX_GET_INSN_RMODE(hi), BV2F(RZ_FLOAT_IEEE754_BIN_64, CAST(64, IL_FALSE, Rss))))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// Rd = convert_df2uw(Rss) +RzILOpEffect *hex_il_op_f2_conv_df2uw(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // Rd = ((st32) ((ut32) HEX_D_TO_INT(HEX_GET_INSN_RMODE(hi), DOUBLE(RZ_FLOAT_IEEE754_BIN_64, ((ut64) Rss))))); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(32, IL_FALSE, HEX_D_TO_INT(HEX_GET_INSN_RMODE(hi), BV2F(RZ_FLOAT_IEEE754_BIN_64, CAST(64, IL_FALSE, Rss)))))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// Rd = convert_df2uw(Rss):chop +RzILOpEffect *hex_il_op_f2_conv_df2uw_chop(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // Rd = ((st32) ((ut32) HEX_D_TO_INT(HEX_GET_INSN_RMODE(hi), DOUBLE(RZ_FLOAT_IEEE754_BIN_64, ((ut64) Rss))))); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(32, IL_FALSE, HEX_D_TO_INT(HEX_GET_INSN_RMODE(hi), BV2F(RZ_FLOAT_IEEE754_BIN_64, CAST(64, IL_FALSE, Rss)))))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_9; + return instruction_sequence; +} + +// Rd = convert_df2w(Rss) +RzILOpEffect *hex_il_op_f2_conv_df2w(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // Rd = ((st32) HEX_D_TO_SINT(HEX_GET_INSN_RMODE(hi), DOUBLE(RZ_FLOAT_IEEE754_BIN_64, ((ut64) Rss)))); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, HEX_D_TO_SINT(HEX_GET_INSN_RMODE(hi), BV2F(RZ_FLOAT_IEEE754_BIN_64, CAST(64, IL_FALSE, Rss))))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_7; + return instruction_sequence; +} + +// Rd = convert_df2w(Rss):chop +RzILOpEffect *hex_il_op_f2_conv_df2w_chop(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // Rd = ((st32) HEX_D_TO_SINT(HEX_GET_INSN_RMODE(hi), DOUBLE(RZ_FLOAT_IEEE754_BIN_64, ((ut64) Rss)))); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, HEX_D_TO_SINT(HEX_GET_INSN_RMODE(hi), BV2F(RZ_FLOAT_IEEE754_BIN_64, CAST(64, IL_FALSE, Rss))))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// Rdd = convert_sf2d(Rs) +RzILOpEffect *hex_il_op_f2_conv_sf2d(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rdd = convert_sf2d(Rs):chop +RzILOpEffect *hex_il_op_f2_conv_sf2d_chop(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rdd = convert_sf2df(Rs) +RzILOpEffect *hex_il_op_f2_conv_sf2df(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rdd = convert_sf2ud(Rs) +RzILOpEffect *hex_il_op_f2_conv_sf2ud(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rdd = convert_sf2ud(Rs):chop +RzILOpEffect *hex_il_op_f2_conv_sf2ud_chop(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = convert_sf2uw(Rs) +RzILOpEffect *hex_il_op_f2_conv_sf2uw(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = convert_sf2uw(Rs):chop +RzILOpEffect *hex_il_op_f2_conv_sf2uw_chop(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = convert_sf2w(Rs) +RzILOpEffect *hex_il_op_f2_conv_sf2w(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = convert_sf2w(Rs):chop +RzILOpEffect *hex_il_op_f2_conv_sf2w_chop(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rdd = convert_ud2df(Rss) +RzILOpEffect *hex_il_op_f2_conv_ud2df(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // Rdd = ((st64) fUNDOUBLE(HEX_INT_TO_D(HEX_GET_INSN_RMODE(hi), ((ut64) Rss)))); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, F2BV(HEX_INT_TO_D(HEX_GET_INSN_RMODE(hi), CAST(64, IL_FALSE, Rss))))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_7; + return instruction_sequence; +} + +// Rd = convert_ud2sf(Rss) +RzILOpEffect *hex_il_op_f2_conv_ud2sf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rdd = convert_uw2df(Rs) +RzILOpEffect *hex_il_op_f2_conv_uw2df(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rdd = ((st64) fUNDOUBLE(HEX_INT_TO_D(HEX_GET_INSN_RMODE(hi), ((ut64) ((ut32) Rs))))); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, F2BV(HEX_INT_TO_D(HEX_GET_INSN_RMODE(hi), CAST(64, IL_FALSE, CAST(32, IL_FALSE, Rs)))))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// Rd = convert_uw2sf(Rs) +RzILOpEffect *hex_il_op_f2_conv_uw2sf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rdd = convert_w2df(Rs) +RzILOpEffect *hex_il_op_f2_conv_w2df(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rdd = ((st64) fUNDOUBLE(HEX_SINT_TO_D(HEX_GET_INSN_RMODE(hi), ((st64) Rs)))); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, F2BV(HEX_SINT_TO_D(HEX_GET_INSN_RMODE(hi), CAST(64, MSB(Rs), DUP(Rs)))))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_7; + return instruction_sequence; +} + +// Rd = convert_w2sf(Rs) +RzILOpEffect *hex_il_op_f2_conv_w2sf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rdd = dfadd(Rss,Rtt) +RzILOpEffect *hex_il_op_f2_dfadd(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rdd = ((st64) fUNDOUBLE(DOUBLE(RZ_FLOAT_IEEE754_BIN_64, ((ut64) Rss)) + DOUBLE(RZ_FLOAT_IEEE754_BIN_64, ((ut64) Rtt)))); + RzILOpPure *op_ADD_7 = FADD(HEX_GET_INSN_RMODE(hi), BV2F(RZ_FLOAT_IEEE754_BIN_64, CAST(64, IL_FALSE, Rss)), BV2F(RZ_FLOAT_IEEE754_BIN_64, CAST(64, IL_FALSE, Rtt))); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, F2BV(op_ADD_7))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_10; + return instruction_sequence; +} + +// Pd = dfclass(Rss,Ii) +RzILOpEffect *hex_il_op_f2_dfclass(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Pd = dfcmp.eq(Rss,Rtt) +RzILOpEffect *hex_il_op_f2_dfcmpeq(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Pd = ((st8) ((DOUBLE(RZ_FLOAT_IEEE754_BIN_64, ((ut64) Rss)) == DOUBLE(RZ_FLOAT_IEEE754_BIN_64, ((ut64) Rtt))) ? 0xff : 0x0)); + RzILOpPure *op_EQ_7 = FEQ(BV2F(RZ_FLOAT_IEEE754_BIN_64, CAST(64, IL_FALSE, Rss)), BV2F(RZ_FLOAT_IEEE754_BIN_64, CAST(64, IL_FALSE, Rtt))); + RzILOpPure *cond_10 = ITE(op_EQ_7, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_10), DUP(cond_10))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_12; + return instruction_sequence; +} + +// Pd = dfcmp.ge(Rss,Rtt) +RzILOpEffect *hex_il_op_f2_dfcmpge(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Pd = ((st8) ((DOUBLE(RZ_FLOAT_IEEE754_BIN_64, ((ut64) Rss)) >= DOUBLE(RZ_FLOAT_IEEE754_BIN_64, ((ut64) Rtt))) ? 0xff : 0x0)); + RzILOpPure *op_GE_7 = FGE(BV2F(RZ_FLOAT_IEEE754_BIN_64, CAST(64, IL_FALSE, Rss)), BV2F(RZ_FLOAT_IEEE754_BIN_64, CAST(64, IL_FALSE, Rtt))); + RzILOpPure *cond_10 = ITE(op_GE_7, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_10), DUP(cond_10))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_12; + return instruction_sequence; +} + +// Pd = dfcmp.gt(Rss,Rtt) +RzILOpEffect *hex_il_op_f2_dfcmpgt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Pd = ((st8) ((DOUBLE(RZ_FLOAT_IEEE754_BIN_64, ((ut64) Rss)) > DOUBLE(RZ_FLOAT_IEEE754_BIN_64, ((ut64) Rtt))) ? 0xff : 0x0)); + RzILOpPure *op_GT_7 = FGT(BV2F(RZ_FLOAT_IEEE754_BIN_64, CAST(64, IL_FALSE, Rss)), BV2F(RZ_FLOAT_IEEE754_BIN_64, CAST(64, IL_FALSE, Rtt))); + RzILOpPure *cond_10 = ITE(op_GT_7, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_10), DUP(cond_10))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_12; + return instruction_sequence; +} + +// Pd = dfcmp.uo(Rss,Rtt) +RzILOpEffect *hex_il_op_f2_dfcmpuo(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rdd = dfmake(Ii):neg +RzILOpEffect *hex_il_op_f2_dfimm_n(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // Rdd = ((st64) (0x3f9 << 0x34)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(UN(64, 0x3f9), SN(32, 0x34)); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_LSHIFT_5)); + + // u = u; + RzILOpEffect *imm_assign_8 = SETL("u", u); + + // Rdd = Rdd + ((st64) (((ut64) u) << 0x2e)); + RzILOpPure *op_LSHIFT_12 = SHIFTL0(CAST(64, IL_FALSE, VARL("u")), SN(32, 0x2e)); + RzILOpPure *op_ADD_14 = ADD(READ_REG(pkt, Rdd_op, true), CAST(64, IL_FALSE, op_LSHIFT_12)); + RzILOpEffect *op_ASSIGN_ADD_15 = WRITE_REG(bundle, Rdd_op, op_ADD_14); + + // Rdd = (Rdd | ((st64) (0x1 << 0x3f))); + RzILOpPure *op_LSHIFT_18 = SHIFTL0(UN(64, 1), SN(32, 0x3f)); + RzILOpPure *op_OR_20 = LOGOR(READ_REG(pkt, Rdd_op, true), CAST(64, IL_FALSE, op_LSHIFT_18)); + RzILOpEffect *op_ASSIGN_OR_21 = WRITE_REG(bundle, Rdd_op, op_OR_20); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_8, op_ASSIGN_7, op_ASSIGN_ADD_15, op_ASSIGN_OR_21); + return instruction_sequence; +} + +// Rdd = dfmake(Ii):pos +RzILOpEffect *hex_il_op_f2_dfimm_p(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // Rdd = ((st64) (0x3f9 << 0x34)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(UN(64, 0x3f9), SN(32, 0x34)); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_LSHIFT_5)); + + // u = u; + RzILOpEffect *imm_assign_8 = SETL("u", u); + + // Rdd = Rdd + ((st64) (((ut64) u) << 0x2e)); + RzILOpPure *op_LSHIFT_12 = SHIFTL0(CAST(64, IL_FALSE, VARL("u")), SN(32, 0x2e)); + RzILOpPure *op_ADD_14 = ADD(READ_REG(pkt, Rdd_op, true), CAST(64, IL_FALSE, op_LSHIFT_12)); + RzILOpEffect *op_ASSIGN_ADD_15 = WRITE_REG(bundle, Rdd_op, op_ADD_14); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_8, op_ASSIGN_7, op_ASSIGN_ADD_15); + return instruction_sequence; +} + +// Rdd = dfmax(Rss,Rtt) +RzILOpEffect *hex_il_op_f2_dfmax(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rdd = dfmin(Rss,Rtt) +RzILOpEffect *hex_il_op_f2_dfmin(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rxx += dfmpyhh(Rss,Rtt) +RzILOpEffect *hex_il_op_f2_dfmpyhh(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rxx += dfmpylh(Rss,Rtt) +RzILOpEffect *hex_il_op_f2_dfmpylh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rxx = Rxx + ((st64) (((ut64) ((ut32) ((Rss >> 0x0) & 0xffffffff))) * (((ut64) 0x100000) | extract64(((ut64) ((ut32) ((Rtt >> 0x20) & 0xffffffff))), 0x0, 0x14)) << 0x1)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_18 = SHIFTRA(Rtt, SN(32, 0x20)); + RzILOpPure *op_AND_20 = LOGAND(op_RSHIFT_18, SN(64, 0xffffffff)); + RzILOpPure *op_OR_28 = LOGOR(CAST(64, IL_FALSE, SN(32, 0x100000)), EXTRACT64(CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_20)), SN(32, 0), SN(32, 20))); + RzILOpPure *op_MUL_29 = MUL(CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_7)), op_OR_28); + RzILOpPure *op_LSHIFT_31 = SHIFTL0(op_MUL_29, SN(32, 1)); + RzILOpPure *op_ADD_33 = ADD(READ_REG(pkt, Rxx_op, false), CAST(64, IL_FALSE, op_LSHIFT_31)); + RzILOpEffect *op_ASSIGN_ADD_34 = WRITE_REG(bundle, Rxx_op, op_ADD_33); + + RzILOpEffect *instruction_sequence = op_ASSIGN_ADD_34; + return instruction_sequence; +} + +// Rdd = dfmpyll(Rss,Rtt) +RzILOpEffect *hex_il_op_f2_dfmpyll(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut64 prod; + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // prod = ((ut64) ((ut32) ((ut64) ((ut32) ((Rss >> 0x0) & 0xffffffff))))) * ((ut64) ((ut32) ((ut64) ((ut32) ((Rtt >> 0x0) & 0xffffffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_23 = MUL(CAST(64, IL_FALSE, CAST(32, IL_FALSE, CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_7)))), CAST(64, IL_FALSE, CAST(32, IL_FALSE, CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_18))))); + RzILOpEffect *op_ASSIGN_24 = SETL("prod", op_MUL_23); + + // Rdd = ((st64) ((prod >> 0x20) << 0x1)); + RzILOpPure *op_RSHIFT_27 = SHIFTR0(VARL("prod"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_29 = SHIFTL0(op_RSHIFT_27, SN(32, 1)); + RzILOpEffect *op_ASSIGN_31 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_LSHIFT_29)); + + // Rdd = ((st64) ((((ut64) Rdd) & (~(0x1 << 0x0))) | (((ut64) 0x1) << 0x0))); + RzILOpPure *op_LSHIFT_46 = SHIFTL0(UN(64, 1), SN(32, 0)); + RzILOpPure *op_NOT_47 = LOGNOT(op_LSHIFT_46); + RzILOpPure *op_AND_49 = LOGAND(CAST(64, IL_FALSE, READ_REG(pkt, Rdd_op, true)), op_NOT_47); + RzILOpPure *op_LSHIFT_53 = SHIFTL0(CAST(64, IL_FALSE, SN(32, 1)), SN(32, 0)); + RzILOpPure *op_OR_54 = LOGOR(op_AND_49, op_LSHIFT_53); + RzILOpEffect *op_ASSIGN_56 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_54)); + + // seq(Rdd = ((st64) ((((ut64) Rdd) & (~(0x1 << 0x0))) | (((ut64) 0 ...; + RzILOpEffect *seq_then_57 = op_ASSIGN_56; + + // if ((((ut64) ((ut32) ((prod >> 0x0) & ((ut64) 0xffffffff)))) != ((ut64) 0x0))) {seq(Rdd = ((st64) ((((ut64) Rdd) & (~(0x1 << 0x0))) | (((ut64) 0 ...} else {{}}; + RzILOpPure *op_RSHIFT_35 = SHIFTR0(VARL("prod"), SN(32, 0)); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_35, CAST(64, IL_FALSE, SN(64, 0xffffffff))); + RzILOpPure *op_NE_43 = INV(EQ(CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_38)), CAST(64, IL_FALSE, SN(32, 0)))); + RzILOpEffect *branch_58 = BRANCH(op_NE_43, seq_then_57, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(3, op_ASSIGN_24, op_ASSIGN_31, branch_58); + return instruction_sequence; +} + +// Rdd = dfsub(Rss,Rtt) +RzILOpEffect *hex_il_op_f2_dfsub(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rdd = ((st64) fUNDOUBLE(DOUBLE(RZ_FLOAT_IEEE754_BIN_64, ((ut64) Rss)) - DOUBLE(RZ_FLOAT_IEEE754_BIN_64, ((ut64) Rtt)))); + RzILOpPure *op_SUB_7 = FSUB(HEX_GET_INSN_RMODE(hi), BV2F(RZ_FLOAT_IEEE754_BIN_64, CAST(64, IL_FALSE, Rss)), BV2F(RZ_FLOAT_IEEE754_BIN_64, CAST(64, IL_FALSE, Rtt))); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, F2BV(op_SUB_7))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_10; + return instruction_sequence; +} + +// Rd = sfadd(Rs,Rt) +RzILOpEffect *hex_il_op_f2_sfadd(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((st32) fUNFLOAT(FLOAT(RZ_FLOAT_IEEE754_BIN_32, ((ut32) Rs)) + FLOAT(RZ_FLOAT_IEEE754_BIN_32, ((ut32) Rt)))); + RzILOpPure *op_ADD_7 = FADD(HEX_GET_INSN_RMODE(hi), BV2F(RZ_FLOAT_IEEE754_BIN_32, CAST(32, IL_FALSE, Rs)), BV2F(RZ_FLOAT_IEEE754_BIN_32, CAST(32, IL_FALSE, Rt))); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, F2BV(op_ADD_7))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_10; + return instruction_sequence; +} + +// Pd = sfclass(Rs,Ii) +RzILOpEffect *hex_il_op_f2_sfclass(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Pd = sfcmp.eq(Rs,Rt) +RzILOpEffect *hex_il_op_f2_sfcmpeq(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Pd = ((st8) ((FLOAT(RZ_FLOAT_IEEE754_BIN_32, ((ut32) Rs)) == FLOAT(RZ_FLOAT_IEEE754_BIN_32, ((ut32) Rt))) ? 0xff : 0x0)); + RzILOpPure *op_EQ_7 = FEQ(BV2F(RZ_FLOAT_IEEE754_BIN_32, CAST(32, IL_FALSE, Rs)), BV2F(RZ_FLOAT_IEEE754_BIN_32, CAST(32, IL_FALSE, Rt))); + RzILOpPure *cond_10 = ITE(op_EQ_7, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_10), DUP(cond_10))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_12; + return instruction_sequence; +} + +// Pd = sfcmp.ge(Rs,Rt) +RzILOpEffect *hex_il_op_f2_sfcmpge(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Pd = ((st8) ((FLOAT(RZ_FLOAT_IEEE754_BIN_32, ((ut32) Rs)) >= FLOAT(RZ_FLOAT_IEEE754_BIN_32, ((ut32) Rt))) ? 0xff : 0x0)); + RzILOpPure *op_GE_7 = FGE(BV2F(RZ_FLOAT_IEEE754_BIN_32, CAST(32, IL_FALSE, Rs)), BV2F(RZ_FLOAT_IEEE754_BIN_32, CAST(32, IL_FALSE, Rt))); + RzILOpPure *cond_10 = ITE(op_GE_7, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_10), DUP(cond_10))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_12; + return instruction_sequence; +} + +// Pd = sfcmp.gt(Rs,Rt) +RzILOpEffect *hex_il_op_f2_sfcmpgt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Pd = ((st8) ((FLOAT(RZ_FLOAT_IEEE754_BIN_32, ((ut32) Rs)) > FLOAT(RZ_FLOAT_IEEE754_BIN_32, ((ut32) Rt))) ? 0xff : 0x0)); + RzILOpPure *op_GT_7 = FGT(BV2F(RZ_FLOAT_IEEE754_BIN_32, CAST(32, IL_FALSE, Rs)), BV2F(RZ_FLOAT_IEEE754_BIN_32, CAST(32, IL_FALSE, Rt))); + RzILOpPure *cond_10 = ITE(op_GT_7, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_10), DUP(cond_10))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_12; + return instruction_sequence; +} + +// Pd = sfcmp.uo(Rs,Rt) +RzILOpEffect *hex_il_op_f2_sfcmpuo(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = sffixupd(Rs,Rt) +RzILOpEffect *hex_il_op_f2_sffixupd(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = sffixupn(Rs,Rt) +RzILOpEffect *hex_il_op_f2_sffixupn(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = sffixupr(Rs) +RzILOpEffect *hex_il_op_f2_sffixupr(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rx += sfmpy(Rs,Rt) +RzILOpEffect *hex_il_op_f2_sffma(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rx += sfmpy(Rs,Rt):lib +RzILOpEffect *hex_il_op_f2_sffma_lib(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rx += sfmpy(Rs,Rt,Pu):scale +RzILOpEffect *hex_il_op_f2_sffma_sc(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rx -= sfmpy(Rs,Rt) +RzILOpEffect *hex_il_op_f2_sffms(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rx -= sfmpy(Rs,Rt):lib +RzILOpEffect *hex_il_op_f2_sffms_lib(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = sfmake(Ii):neg +RzILOpEffect *hex_il_op_f2_sfimm_n(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // Rd = (0x79 << 0x17); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(32, 0x79), SN(32, 23)); + RzILOpEffect *op_ASSIGN_6 = WRITE_REG(bundle, Rd_op, op_LSHIFT_5); + + // u = u; + RzILOpEffect *imm_assign_7 = SETL("u", u); + + // Rd = Rd + ((st32) (u << 0x11)); + RzILOpPure *op_LSHIFT_10 = SHIFTL0(VARL("u"), SN(32, 17)); + RzILOpPure *op_ADD_12 = ADD(READ_REG(pkt, Rd_op, true), CAST(32, IL_FALSE, op_LSHIFT_10)); + RzILOpEffect *op_ASSIGN_ADD_13 = WRITE_REG(bundle, Rd_op, op_ADD_12); + + // Rd = (Rd | (0x1 << 0x1f)); + RzILOpPure *op_LSHIFT_16 = SHIFTL0(SN(32, 1), SN(32, 31)); + RzILOpPure *op_OR_17 = LOGOR(READ_REG(pkt, Rd_op, true), op_LSHIFT_16); + RzILOpEffect *op_ASSIGN_OR_18 = WRITE_REG(bundle, Rd_op, op_OR_17); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_7, op_ASSIGN_6, op_ASSIGN_ADD_13, op_ASSIGN_OR_18); + return instruction_sequence; +} + +// Rd = sfmake(Ii):pos +RzILOpEffect *hex_il_op_f2_sfimm_p(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // Rd = (0x79 << 0x17); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(32, 0x79), SN(32, 23)); + RzILOpEffect *op_ASSIGN_6 = WRITE_REG(bundle, Rd_op, op_LSHIFT_5); + + // u = u; + RzILOpEffect *imm_assign_7 = SETL("u", u); + + // Rd = Rd + ((st32) (u << 0x11)); + RzILOpPure *op_LSHIFT_10 = SHIFTL0(VARL("u"), SN(32, 17)); + RzILOpPure *op_ADD_12 = ADD(READ_REG(pkt, Rd_op, true), CAST(32, IL_FALSE, op_LSHIFT_10)); + RzILOpEffect *op_ASSIGN_ADD_13 = WRITE_REG(bundle, Rd_op, op_ADD_12); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_7, op_ASSIGN_6, op_ASSIGN_ADD_13); + return instruction_sequence; +} + +// Rd,Pe = sfinvsqrta(Rs) +RzILOpEffect *hex_il_op_f2_sfinvsqrta(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = sfmax(Rs,Rt) +RzILOpEffect *hex_il_op_f2_sfmax(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = sfmin(Rs,Rt) +RzILOpEffect *hex_il_op_f2_sfmin(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = sfmpy(Rs,Rt) +RzILOpEffect *hex_il_op_f2_sfmpy(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd,Pe = sfrecipa(Rs,Rt) +RzILOpEffect *hex_il_op_f2_sfrecipa(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = sfsub(Rs,Rt) +RzILOpEffect *hex_il_op_f2_sfsub(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((st32) fUNFLOAT(FLOAT(RZ_FLOAT_IEEE754_BIN_32, ((ut32) Rs)) - FLOAT(RZ_FLOAT_IEEE754_BIN_32, ((ut32) Rt)))); + RzILOpPure *op_SUB_7 = FSUB(HEX_GET_INSN_RMODE(hi), BV2F(RZ_FLOAT_IEEE754_BIN_32, CAST(32, IL_FALSE, Rs)), BV2F(RZ_FLOAT_IEEE754_BIN_32, CAST(32, IL_FALSE, Rt))); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, F2BV(op_SUB_7))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_10; + return instruction_sequence; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_G4_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_G4_ops.c new file mode 100644 index 00000000000..ab13cca78bb --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_G4_ops.c @@ -0,0 +1,37 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// Rdd = Gss +RzILOpEffect *hex_il_op_g4_tfrgcpp(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = Gs +RzILOpEffect *hex_il_op_g4_tfrgcrr(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Gdd = Rss +RzILOpEffect *hex_il_op_g4_tfrgpcp(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Gd = Rs +RzILOpEffect *hex_il_op_g4_tfrgrcr(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_IMPORTED_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_IMPORTED_ops.c new file mode 100644 index 00000000000..07468a894a9 --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_IMPORTED_ops.c @@ -0,0 +1,52 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// Rd = Ss +RzILOpEffect *hex_il_op_imported_rd_ss(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = memw_phys(Rs,Rt) +RzILOpEffect *hex_il_op_imported_rd_memw_phys_rs_rt(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rdd = Sss +RzILOpEffect *hex_il_op_imported_rdd_sss(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Sd = Rs +RzILOpEffect *hex_il_op_imported_sd_rs(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Sdd = Rss +RzILOpEffect *hex_il_op_imported_sdd_rss(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// l2gclean(Rtt) +RzILOpEffect *hex_il_op_imported_l2gclean_rtt(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// l2gcleaninv(Rtt) +RzILOpEffect *hex_il_op_imported_l2gcleaninv_rtt(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_J2_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_J2_ops.c new file mode 100644 index 00000000000..f4002386869 --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_J2_ops.c @@ -0,0 +1,1433 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// call Ii +RzILOpEffect *hex_il_op_j2_call(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + const HexOp lr_op = ALIAS2OP(HEX_REG_ALIAS_LR, false); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_0 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_6 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_7 = SETL("r", op_AND_6); + + // HYB(call_pkt); + RzILOpEffect *c_call_9 = HEX_GET_NPC(pkt); + + // h_tmp157 = HYB(call_pkt); + RzILOpEffect *op_ASSIGN_hybrid_tmp_11 = SETL("h_tmp157", UNSIGNED(32, VARL("ret_val"))); + + // seq(HYB(call_pkt); h_tmp157 = HYB(call_pkt)); + RzILOpEffect *seq_12 = SEQN(2, c_call_9, op_ASSIGN_hybrid_tmp_11); + + // lr = (h_tmp157 & ((ut32) 0xfffffffe)); + RzILOpPure *op_AND_15 = LOGAND(VARL("h_tmp157"), CAST(32, IL_FALSE, SN(32, 0xfffffffe))); + RzILOpEffect *op_ASSIGN_16 = WRITE_REG(bundle, &lr_op, op_AND_15); + + // seq(seq(HYB(call_pkt); h_tmp157 = HYB(call_pkt)); lr = (h_tmp157 ...; + RzILOpEffect *seq_17 = SEQN(2, seq_12, op_ASSIGN_16); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_20 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_20_21 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_20)); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, op_ASSIGN_7, seq_17, jump_op_ADD_20_21); + return instruction_sequence; +} + +// if (!Pu) call Ii +RzILOpEffect *hex_il_op_j2_callf(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp lr_op = ALIAS2OP(HEX_REG_ALIAS_LR, false); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_0 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_6 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_7 = SETL("r", op_AND_6); + + // HYB(call_pkt); + RzILOpEffect *c_call_15 = HEX_GET_NPC(pkt); + + // h_tmp158 = HYB(call_pkt); + RzILOpEffect *op_ASSIGN_hybrid_tmp_17 = SETL("h_tmp158", UNSIGNED(32, VARL("ret_val"))); + + // seq(HYB(call_pkt); h_tmp158 = HYB(call_pkt)); + RzILOpEffect *seq_18 = SEQN(2, c_call_15, op_ASSIGN_hybrid_tmp_17); + + // lr = (h_tmp158 & ((ut32) 0xfffffffe)); + RzILOpPure *op_AND_21 = LOGAND(VARL("h_tmp158"), CAST(32, IL_FALSE, SN(32, 0xfffffffe))); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, &lr_op, op_AND_21); + + // seq(seq(HYB(call_pkt); h_tmp158 = HYB(call_pkt)); lr = (h_tmp158 ...; + RzILOpEffect *seq_23 = SEQN(2, seq_18, op_ASSIGN_22); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_26 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_26_27 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_26)); + + // seq(seq(seq(HYB(call_pkt); h_tmp158 = HYB(call_pkt)); lr = (h_tm ...; + RzILOpEffect *seq_then_30 = SEQN(2, seq_23, jump_op_ADD_26_27); + + // if (! (((st32) Pu) & 0x1)) {seq(seq(seq(HYB(call_pkt); h_tmp158 = HYB(call_pkt)); lr = (h_tm ...} else {{}}; + RzILOpPure *op_AND_12 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpPure *op_INV_13 = INV(NON_ZERO(op_AND_12)); + RzILOpEffect *branch_31 = BRANCH(op_INV_13, seq_then_30, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_7, branch_31); + return instruction_sequence; +} + +// callr Rs +RzILOpEffect *hex_il_op_j2_callr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp lr_op = ALIAS2OP(HEX_REG_ALIAS_LR, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // HYB(call_pkt); + RzILOpEffect *c_call_1 = HEX_GET_NPC(pkt); + + // h_tmp159 = HYB(call_pkt); + RzILOpEffect *op_ASSIGN_hybrid_tmp_3 = SETL("h_tmp159", UNSIGNED(32, VARL("ret_val"))); + + // seq(HYB(call_pkt); h_tmp159 = HYB(call_pkt)); + RzILOpEffect *seq_4 = SEQN(2, c_call_1, op_ASSIGN_hybrid_tmp_3); + + // lr = (h_tmp159 & ((ut32) 0xfffffffe)); + RzILOpPure *op_AND_7 = LOGAND(VARL("h_tmp159"), CAST(32, IL_FALSE, SN(32, 0xfffffffe))); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, &lr_op, op_AND_7); + + // seq(seq(HYB(call_pkt); h_tmp159 = HYB(call_pkt)); lr = (h_tmp159 ...; + RzILOpEffect *seq_9 = SEQN(2, seq_4, op_ASSIGN_8); + + // jump(Rs); + RzILOpEffect *jump_Rs_11 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", Rs)); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_9, jump_Rs_11); + return instruction_sequence; +} + +// if (!Pu) callr Rs +RzILOpEffect *hex_il_op_j2_callrf(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp lr_op = ALIAS2OP(HEX_REG_ALIAS_LR, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // HYB(call_pkt); + RzILOpEffect *c_call_7 = HEX_GET_NPC(pkt); + + // h_tmp160 = HYB(call_pkt); + RzILOpEffect *op_ASSIGN_hybrid_tmp_9 = SETL("h_tmp160", UNSIGNED(32, VARL("ret_val"))); + + // seq(HYB(call_pkt); h_tmp160 = HYB(call_pkt)); + RzILOpEffect *seq_10 = SEQN(2, c_call_7, op_ASSIGN_hybrid_tmp_9); + + // lr = (h_tmp160 & ((ut32) 0xfffffffe)); + RzILOpPure *op_AND_13 = LOGAND(VARL("h_tmp160"), CAST(32, IL_FALSE, SN(32, 0xfffffffe))); + RzILOpEffect *op_ASSIGN_14 = WRITE_REG(bundle, &lr_op, op_AND_13); + + // seq(seq(HYB(call_pkt); h_tmp160 = HYB(call_pkt)); lr = (h_tmp160 ...; + RzILOpEffect *seq_15 = SEQN(2, seq_10, op_ASSIGN_14); + + // jump(Rs); + RzILOpEffect *jump_Rs_17 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", Rs)); + + // seq(seq(seq(HYB(call_pkt); h_tmp160 = HYB(call_pkt)); lr = (h_tm ...; + RzILOpEffect *seq_then_20 = SEQN(2, seq_15, jump_Rs_17); + + // if (! (((st32) Pu) & 0x1)) {seq(seq(seq(HYB(call_pkt); h_tmp160 = HYB(call_pkt)); lr = (h_tm ...} else {{}}; + RzILOpPure *op_AND_4 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpPure *op_INV_5 = INV(NON_ZERO(op_AND_4)); + RzILOpEffect *branch_21 = BRANCH(op_INV_5, seq_then_20, EMPTY()); + + RzILOpEffect *instruction_sequence = branch_21; + return instruction_sequence; +} + +// callrh Rs +RzILOpEffect *hex_il_op_j2_callrh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp lr_op = ALIAS2OP(HEX_REG_ALIAS_LR, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // HYB(call_pkt); + RzILOpEffect *c_call_1 = HEX_GET_NPC(pkt); + + // h_tmp161 = HYB(call_pkt); + RzILOpEffect *op_ASSIGN_hybrid_tmp_3 = SETL("h_tmp161", UNSIGNED(32, VARL("ret_val"))); + + // seq(HYB(call_pkt); h_tmp161 = HYB(call_pkt)); + RzILOpEffect *seq_4 = SEQN(2, c_call_1, op_ASSIGN_hybrid_tmp_3); + + // lr = (h_tmp161 & ((ut32) 0xfffffffe)); + RzILOpPure *op_AND_7 = LOGAND(VARL("h_tmp161"), CAST(32, IL_FALSE, SN(32, 0xfffffffe))); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, &lr_op, op_AND_7); + + // seq(seq(HYB(call_pkt); h_tmp161 = HYB(call_pkt)); lr = (h_tmp161 ...; + RzILOpEffect *seq_9 = SEQN(2, seq_4, op_ASSIGN_8); + + // jump(Rs); + RzILOpEffect *jump_Rs_11 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", Rs)); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_9, jump_Rs_11); + return instruction_sequence; +} + +// if (Pu) callr Rs +RzILOpEffect *hex_il_op_j2_callrt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp lr_op = ALIAS2OP(HEX_REG_ALIAS_LR, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // HYB(call_pkt); + RzILOpEffect *c_call_6 = HEX_GET_NPC(pkt); + + // h_tmp162 = HYB(call_pkt); + RzILOpEffect *op_ASSIGN_hybrid_tmp_8 = SETL("h_tmp162", UNSIGNED(32, VARL("ret_val"))); + + // seq(HYB(call_pkt); h_tmp162 = HYB(call_pkt)); + RzILOpEffect *seq_9 = SEQN(2, c_call_6, op_ASSIGN_hybrid_tmp_8); + + // lr = (h_tmp162 & ((ut32) 0xfffffffe)); + RzILOpPure *op_AND_12 = LOGAND(VARL("h_tmp162"), CAST(32, IL_FALSE, SN(32, 0xfffffffe))); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, &lr_op, op_AND_12); + + // seq(seq(HYB(call_pkt); h_tmp162 = HYB(call_pkt)); lr = (h_tmp162 ...; + RzILOpEffect *seq_14 = SEQN(2, seq_9, op_ASSIGN_13); + + // jump(Rs); + RzILOpEffect *jump_Rs_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", Rs)); + + // seq(seq(seq(HYB(call_pkt); h_tmp162 = HYB(call_pkt)); lr = (h_tm ...; + RzILOpEffect *seq_then_19 = SEQN(2, seq_14, jump_Rs_16); + + // if ((((st32) Pu) & 0x1)) {seq(seq(seq(HYB(call_pkt); h_tmp162 = HYB(call_pkt)); lr = (h_tm ...} else {{}}; + RzILOpPure *op_AND_4 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpEffect *branch_20 = BRANCH(NON_ZERO(op_AND_4), seq_then_19, EMPTY()); + + RzILOpEffect *instruction_sequence = branch_20; + return instruction_sequence; +} + +// if (Pu) call Ii +RzILOpEffect *hex_il_op_j2_callt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp lr_op = ALIAS2OP(HEX_REG_ALIAS_LR, false); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_0 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_6 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_7 = SETL("r", op_AND_6); + + // HYB(call_pkt); + RzILOpEffect *c_call_14 = HEX_GET_NPC(pkt); + + // h_tmp163 = HYB(call_pkt); + RzILOpEffect *op_ASSIGN_hybrid_tmp_16 = SETL("h_tmp163", UNSIGNED(32, VARL("ret_val"))); + + // seq(HYB(call_pkt); h_tmp163 = HYB(call_pkt)); + RzILOpEffect *seq_17 = SEQN(2, c_call_14, op_ASSIGN_hybrid_tmp_16); + + // lr = (h_tmp163 & ((ut32) 0xfffffffe)); + RzILOpPure *op_AND_20 = LOGAND(VARL("h_tmp163"), CAST(32, IL_FALSE, SN(32, 0xfffffffe))); + RzILOpEffect *op_ASSIGN_21 = WRITE_REG(bundle, &lr_op, op_AND_20); + + // seq(seq(HYB(call_pkt); h_tmp163 = HYB(call_pkt)); lr = (h_tmp163 ...; + RzILOpEffect *seq_22 = SEQN(2, seq_17, op_ASSIGN_21); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_25 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_25_26 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_25)); + + // seq(seq(seq(HYB(call_pkt); h_tmp163 = HYB(call_pkt)); lr = (h_tm ...; + RzILOpEffect *seq_then_29 = SEQN(2, seq_22, jump_op_ADD_25_26); + + // if ((((st32) Pu) & 0x1)) {seq(seq(seq(HYB(call_pkt); h_tmp163 = HYB(call_pkt)); lr = (h_tm ...} else {{}}; + RzILOpPure *op_AND_12 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpEffect *branch_30 = BRANCH(NON_ZERO(op_AND_12), seq_then_29, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_7, branch_30); + return instruction_sequence; +} + +// jump Ii +RzILOpEffect *hex_il_op_j2_jump(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_0 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_6 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_7 = SETL("r", op_AND_6); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_10 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_10_11 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_10)); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_7, jump_op_ADD_10_11); + return instruction_sequence; +} + +// if (!Pu) jump:nt Ii +RzILOpEffect *hex_il_op_j2_jumpf(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_6 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_12 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_13 = SETL("r", op_AND_12); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_16 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_16_17 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_16)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_20 = SEQN(2, op_ASSIGN_13, jump_op_ADD_16_17); + + // if (! (((st32) Pu) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_4 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpPure *op_INV_5 = INV(NON_ZERO(op_AND_4)); + RzILOpEffect *branch_21 = BRANCH(op_INV_5, seq_then_20, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_6, branch_21); + return instruction_sequence; +} + +// if (!Pu.new) jump:nt Ii +RzILOpEffect *hex_il_op_j2_jumpfnew(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_new_op = ISA2REG(hi, 'u', true); + RzILOpPure *Pu_new = READ_REG(pkt, Pu_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_6 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_12 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_13 = SETL("r", op_AND_12); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_16 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_16_17 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_16)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_20 = SEQN(2, op_ASSIGN_13, jump_op_ADD_16_17); + + // if (! (((st32) Pu_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_4 = LOGAND(CAST(32, MSB(Pu_new), DUP(Pu_new)), SN(32, 1)); + RzILOpPure *op_INV_5 = INV(NON_ZERO(op_AND_4)); + RzILOpEffect *branch_21 = BRANCH(op_INV_5, seq_then_20, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_6, branch_21); + return instruction_sequence; +} + +// if (!Pu.new) jump:t Ii +RzILOpEffect *hex_il_op_j2_jumpfnewpt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_new_op = ISA2REG(hi, 'u', true); + RzILOpPure *Pu_new = READ_REG(pkt, Pu_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_6 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_12 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_13 = SETL("r", op_AND_12); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_16 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_16_17 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_16)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_20 = SEQN(2, op_ASSIGN_13, jump_op_ADD_16_17); + + // if (! (((st32) Pu_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_4 = LOGAND(CAST(32, MSB(Pu_new), DUP(Pu_new)), SN(32, 1)); + RzILOpPure *op_INV_5 = INV(NON_ZERO(op_AND_4)); + RzILOpEffect *branch_21 = BRANCH(op_INV_5, seq_then_20, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_6, branch_21); + return instruction_sequence; +} + +// if (!Pu) jump:t Ii +RzILOpEffect *hex_il_op_j2_jumpfpt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_6 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_12 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_13 = SETL("r", op_AND_12); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_16 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_16_17 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_16)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_20 = SEQN(2, op_ASSIGN_13, jump_op_ADD_16_17); + + // if (! (((st32) Pu) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_4 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpPure *op_INV_5 = INV(NON_ZERO(op_AND_4)); + RzILOpEffect *branch_21 = BRANCH(op_INV_5, seq_then_20, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_6, branch_21); + return instruction_sequence; +} + +// jumpr Rs +RzILOpEffect *hex_il_op_j2_jumpr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // jump(Rs); + RzILOpEffect *jump_Rs_1 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", Rs)); + + RzILOpEffect *instruction_sequence = jump_Rs_1; + return instruction_sequence; +} + +// if (!Pu) jumpr:nt Rs +RzILOpEffect *hex_il_op_j2_jumprf(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // jump(Rs); + RzILOpEffect *jump_Rs_7 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", Rs)); + + // seq(jump(Rs)); + RzILOpEffect *seq_then_10 = jump_Rs_7; + + // if (! (((st32) Pu) & 0x1)) {seq(jump(Rs))} else {{}}; + RzILOpPure *op_AND_4 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpPure *op_INV_5 = INV(NON_ZERO(op_AND_4)); + RzILOpEffect *branch_11 = BRANCH(op_INV_5, seq_then_10, EMPTY()); + + RzILOpEffect *instruction_sequence = branch_11; + return instruction_sequence; +} + +// if (!Pu.new) jumpr:nt Rs +RzILOpEffect *hex_il_op_j2_jumprfnew(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_new_op = ISA2REG(hi, 'u', true); + RzILOpPure *Pu_new = READ_REG(pkt, Pu_new_op, true); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // jump(Rs); + RzILOpEffect *jump_Rs_7 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", Rs)); + + // seq(jump(Rs)); + RzILOpEffect *seq_then_10 = jump_Rs_7; + + // if (! (((st32) Pu_new) & 0x1)) {seq(jump(Rs))} else {{}}; + RzILOpPure *op_AND_4 = LOGAND(CAST(32, MSB(Pu_new), DUP(Pu_new)), SN(32, 1)); + RzILOpPure *op_INV_5 = INV(NON_ZERO(op_AND_4)); + RzILOpEffect *branch_11 = BRANCH(op_INV_5, seq_then_10, EMPTY()); + + RzILOpEffect *instruction_sequence = branch_11; + return instruction_sequence; +} + +// if (!Pu.new) jumpr:t Rs +RzILOpEffect *hex_il_op_j2_jumprfnewpt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_new_op = ISA2REG(hi, 'u', true); + RzILOpPure *Pu_new = READ_REG(pkt, Pu_new_op, true); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // jump(Rs); + RzILOpEffect *jump_Rs_7 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", Rs)); + + // seq(jump(Rs)); + RzILOpEffect *seq_then_10 = jump_Rs_7; + + // if (! (((st32) Pu_new) & 0x1)) {seq(jump(Rs))} else {{}}; + RzILOpPure *op_AND_4 = LOGAND(CAST(32, MSB(Pu_new), DUP(Pu_new)), SN(32, 1)); + RzILOpPure *op_INV_5 = INV(NON_ZERO(op_AND_4)); + RzILOpEffect *branch_11 = BRANCH(op_INV_5, seq_then_10, EMPTY()); + + RzILOpEffect *instruction_sequence = branch_11; + return instruction_sequence; +} + +// if (!Pu) jumpr:t Rs +RzILOpEffect *hex_il_op_j2_jumprfpt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // jump(Rs); + RzILOpEffect *jump_Rs_7 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", Rs)); + + // seq(jump(Rs)); + RzILOpEffect *seq_then_10 = jump_Rs_7; + + // if (! (((st32) Pu) & 0x1)) {seq(jump(Rs))} else {{}}; + RzILOpPure *op_AND_4 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpPure *op_INV_5 = INV(NON_ZERO(op_AND_4)); + RzILOpEffect *branch_11 = BRANCH(op_INV_5, seq_then_10, EMPTY()); + + RzILOpEffect *instruction_sequence = branch_11; + return instruction_sequence; +} + +// if (Rs>=#0) jump:nt Ii +RzILOpEffect *hex_il_op_j2_jumprgtez(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *pc = U32(pkt->pkt_addr); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_7 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_7_8 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_7)); + + // seq(jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_10 = jump_op_ADD_7_8; + + // if ((Rs >= 0x0)) {seq(jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_GE_2 = SGE(Rs, SN(32, 0)); + RzILOpEffect *branch_11 = BRANCH(op_GE_2, seq_then_10, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_11); + return instruction_sequence; +} + +// if (Rs>=#0) jump:t Ii +RzILOpEffect *hex_il_op_j2_jumprgtezpt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *pc = U32(pkt->pkt_addr); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_7 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_7_8 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_7)); + + // seq(jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_10 = jump_op_ADD_7_8; + + // if ((Rs >= 0x0)) {seq(jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_GE_2 = SGE(Rs, SN(32, 0)); + RzILOpEffect *branch_11 = BRANCH(op_GE_2, seq_then_10, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_11); + return instruction_sequence; +} + +// jumprh Rs +RzILOpEffect *hex_il_op_j2_jumprh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // jump(Rs); + RzILOpEffect *jump_Rs_1 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", Rs)); + + RzILOpEffect *instruction_sequence = jump_Rs_1; + return instruction_sequence; +} + +// if (Rs<=#0) jump:nt Ii +RzILOpEffect *hex_il_op_j2_jumprltez(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *pc = U32(pkt->pkt_addr); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_7 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_7_8 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_7)); + + // seq(jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_10 = jump_op_ADD_7_8; + + // if ((Rs <= 0x0)) {seq(jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_LE_2 = SLE(Rs, SN(32, 0)); + RzILOpEffect *branch_11 = BRANCH(op_LE_2, seq_then_10, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_11); + return instruction_sequence; +} + +// if (Rs<=#0) jump:t Ii +RzILOpEffect *hex_il_op_j2_jumprltezpt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *pc = U32(pkt->pkt_addr); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_7 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_7_8 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_7)); + + // seq(jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_10 = jump_op_ADD_7_8; + + // if ((Rs <= 0x0)) {seq(jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_LE_2 = SLE(Rs, SN(32, 0)); + RzILOpEffect *branch_11 = BRANCH(op_LE_2, seq_then_10, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_11); + return instruction_sequence; +} + +// if (Rs==#0) jump:nt Ii +RzILOpEffect *hex_il_op_j2_jumprnz(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *pc = U32(pkt->pkt_addr); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_7 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_7_8 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_7)); + + // seq(jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_10 = jump_op_ADD_7_8; + + // if ((Rs == 0x0)) {seq(jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_EQ_2 = EQ(Rs, SN(32, 0)); + RzILOpEffect *branch_11 = BRANCH(op_EQ_2, seq_then_10, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_11); + return instruction_sequence; +} + +// if (Rs==#0) jump:t Ii +RzILOpEffect *hex_il_op_j2_jumprnzpt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *pc = U32(pkt->pkt_addr); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_7 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_7_8 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_7)); + + // seq(jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_10 = jump_op_ADD_7_8; + + // if ((Rs == 0x0)) {seq(jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_EQ_2 = EQ(Rs, SN(32, 0)); + RzILOpEffect *branch_11 = BRANCH(op_EQ_2, seq_then_10, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_11); + return instruction_sequence; +} + +// if (Pu) jumpr:nt Rs +RzILOpEffect *hex_il_op_j2_jumprt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // jump(Rs); + RzILOpEffect *jump_Rs_6 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", Rs)); + + // seq(jump(Rs)); + RzILOpEffect *seq_then_9 = jump_Rs_6; + + // if ((((st32) Pu) & 0x1)) {seq(jump(Rs))} else {{}}; + RzILOpPure *op_AND_4 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpEffect *branch_10 = BRANCH(NON_ZERO(op_AND_4), seq_then_9, EMPTY()); + + RzILOpEffect *instruction_sequence = branch_10; + return instruction_sequence; +} + +// if (Pu.new) jumpr:nt Rs +RzILOpEffect *hex_il_op_j2_jumprtnew(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_new_op = ISA2REG(hi, 'u', true); + RzILOpPure *Pu_new = READ_REG(pkt, Pu_new_op, true); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // jump(Rs); + RzILOpEffect *jump_Rs_6 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", Rs)); + + // seq(jump(Rs)); + RzILOpEffect *seq_then_9 = jump_Rs_6; + + // if ((((st32) Pu_new) & 0x1)) {seq(jump(Rs))} else {{}}; + RzILOpPure *op_AND_4 = LOGAND(CAST(32, MSB(Pu_new), DUP(Pu_new)), SN(32, 1)); + RzILOpEffect *branch_10 = BRANCH(NON_ZERO(op_AND_4), seq_then_9, EMPTY()); + + RzILOpEffect *instruction_sequence = branch_10; + return instruction_sequence; +} + +// if (Pu.new) jumpr:t Rs +RzILOpEffect *hex_il_op_j2_jumprtnewpt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_new_op = ISA2REG(hi, 'u', true); + RzILOpPure *Pu_new = READ_REG(pkt, Pu_new_op, true); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // jump(Rs); + RzILOpEffect *jump_Rs_6 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", Rs)); + + // seq(jump(Rs)); + RzILOpEffect *seq_then_9 = jump_Rs_6; + + // if ((((st32) Pu_new) & 0x1)) {seq(jump(Rs))} else {{}}; + RzILOpPure *op_AND_4 = LOGAND(CAST(32, MSB(Pu_new), DUP(Pu_new)), SN(32, 1)); + RzILOpEffect *branch_10 = BRANCH(NON_ZERO(op_AND_4), seq_then_9, EMPTY()); + + RzILOpEffect *instruction_sequence = branch_10; + return instruction_sequence; +} + +// if (Pu) jumpr:t Rs +RzILOpEffect *hex_il_op_j2_jumprtpt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // jump(Rs); + RzILOpEffect *jump_Rs_6 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", Rs)); + + // seq(jump(Rs)); + RzILOpEffect *seq_then_9 = jump_Rs_6; + + // if ((((st32) Pu) & 0x1)) {seq(jump(Rs))} else {{}}; + RzILOpPure *op_AND_4 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpEffect *branch_10 = BRANCH(NON_ZERO(op_AND_4), seq_then_9, EMPTY()); + + RzILOpEffect *instruction_sequence = branch_10; + return instruction_sequence; +} + +// if (Rs!=#0) jump:nt Ii +RzILOpEffect *hex_il_op_j2_jumprz(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *pc = U32(pkt->pkt_addr); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_7 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_7_8 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_7)); + + // seq(jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_10 = jump_op_ADD_7_8; + + // if ((Rs != 0x0)) {seq(jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_NE_2 = INV(EQ(Rs, SN(32, 0))); + RzILOpEffect *branch_11 = BRANCH(op_NE_2, seq_then_10, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_11); + return instruction_sequence; +} + +// if (Rs!=#0) jump:t Ii +RzILOpEffect *hex_il_op_j2_jumprzpt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *pc = U32(pkt->pkt_addr); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_7 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_7_8 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_7)); + + // seq(jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_10 = jump_op_ADD_7_8; + + // if ((Rs != 0x0)) {seq(jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_NE_2 = INV(EQ(Rs, SN(32, 0))); + RzILOpEffect *branch_11 = BRANCH(op_NE_2, seq_then_10, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_11); + return instruction_sequence; +} + +// if (Pu) jump:nt Ii +RzILOpEffect *hex_il_op_j2_jumpt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_19 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if ((((st32) Pu) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_4 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpEffect *branch_20 = BRANCH(NON_ZERO(op_AND_4), seq_then_19, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_20); + return instruction_sequence; +} + +// if (Pu.new) jump:nt Ii +RzILOpEffect *hex_il_op_j2_jumptnew(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_new_op = ISA2REG(hi, 'u', true); + RzILOpPure *Pu_new = READ_REG(pkt, Pu_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_19 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if ((((st32) Pu_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_4 = LOGAND(CAST(32, MSB(Pu_new), DUP(Pu_new)), SN(32, 1)); + RzILOpEffect *branch_20 = BRANCH(NON_ZERO(op_AND_4), seq_then_19, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_20); + return instruction_sequence; +} + +// if (Pu.new) jump:t Ii +RzILOpEffect *hex_il_op_j2_jumptnewpt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_new_op = ISA2REG(hi, 'u', true); + RzILOpPure *Pu_new = READ_REG(pkt, Pu_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_19 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if ((((st32) Pu_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_4 = LOGAND(CAST(32, MSB(Pu_new), DUP(Pu_new)), SN(32, 1)); + RzILOpEffect *branch_20 = BRANCH(NON_ZERO(op_AND_4), seq_then_19, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_20); + return instruction_sequence; +} + +// if (Pu) jump:t Ii +RzILOpEffect *hex_il_op_j2_jumptpt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_19 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if ((((st32) Pu) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_4 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 1)); + RzILOpEffect *branch_20 = BRANCH(NON_ZERO(op_AND_4), seq_then_19, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_20); + return instruction_sequence; +} + +// loop0(Ii,II) +RzILOpEffect *hex_il_op_j2_loop0i(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + const HexOp sa0_op = ALIAS2OP(HEX_REG_ALIAS_SA0, false); + RzILOpPure *pc = U32(pkt->pkt_addr); + const HexOp lc0_op = ALIAS2OP(HEX_REG_ALIAS_LC0, false); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // r = r; + RzILOpEffect *imm_assign_0 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_6 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_7 = SETL("r", op_AND_6); + + // sa0 = pc + ((ut32) r); + RzILOpPure *op_ADD_11 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, &sa0_op, op_ADD_11); + + // U = U; + RzILOpEffect *imm_assign_14 = SETL("U", U); + + // lc0 = U; + RzILOpEffect *op_ASSIGN_16 = WRITE_REG(bundle, &lc0_op, VARL("U")); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG, ((ut32) 0x0)); + RzILOpEffect *set_usr_field_call_19 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG, CAST(32, IL_FALSE, SN(32, 0))); + + RzILOpEffect *instruction_sequence = SEQN(6, imm_assign_0, imm_assign_14, op_ASSIGN_7, op_ASSIGN_12, op_ASSIGN_16, set_usr_field_call_19); + return instruction_sequence; +} + +// loop0(Ii,Rs) +RzILOpEffect *hex_il_op_j2_loop0r(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + const HexOp sa0_op = ALIAS2OP(HEX_REG_ALIAS_SA0, false); + RzILOpPure *pc = U32(pkt->pkt_addr); + const HexOp lc0_op = ALIAS2OP(HEX_REG_ALIAS_LC0, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // r = r; + RzILOpEffect *imm_assign_0 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_6 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_7 = SETL("r", op_AND_6); + + // sa0 = pc + ((ut32) r); + RzILOpPure *op_ADD_11 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, &sa0_op, op_ADD_11); + + // lc0 = ((ut32) Rs); + RzILOpEffect *op_ASSIGN_16 = WRITE_REG(bundle, &lc0_op, CAST(32, IL_FALSE, Rs)); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG, ((ut32) 0x0)); + RzILOpEffect *set_usr_field_call_19 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG, CAST(32, IL_FALSE, SN(32, 0))); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_0, op_ASSIGN_7, op_ASSIGN_12, op_ASSIGN_16, set_usr_field_call_19); + return instruction_sequence; +} + +// loop1(Ii,II) +RzILOpEffect *hex_il_op_j2_loop1i(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + const HexOp sa1_op = ALIAS2OP(HEX_REG_ALIAS_SA1, false); + RzILOpPure *pc = U32(pkt->pkt_addr); + const HexOp lc1_op = ALIAS2OP(HEX_REG_ALIAS_LC1, false); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // r = r; + RzILOpEffect *imm_assign_0 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_6 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_7 = SETL("r", op_AND_6); + + // sa1 = pc + ((ut32) r); + RzILOpPure *op_ADD_11 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, &sa1_op, op_ADD_11); + + // U = U; + RzILOpEffect *imm_assign_14 = SETL("U", U); + + // lc1 = U; + RzILOpEffect *op_ASSIGN_16 = WRITE_REG(bundle, &lc1_op, VARL("U")); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_0, imm_assign_14, op_ASSIGN_7, op_ASSIGN_12, op_ASSIGN_16); + return instruction_sequence; +} + +// loop1(Ii,Rs) +RzILOpEffect *hex_il_op_j2_loop1r(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + const HexOp sa1_op = ALIAS2OP(HEX_REG_ALIAS_SA1, false); + RzILOpPure *pc = U32(pkt->pkt_addr); + const HexOp lc1_op = ALIAS2OP(HEX_REG_ALIAS_LC1, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // r = r; + RzILOpEffect *imm_assign_0 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_6 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_7 = SETL("r", op_AND_6); + + // sa1 = pc + ((ut32) r); + RzILOpPure *op_ADD_11 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, &sa1_op, op_ADD_11); + + // lc1 = ((ut32) Rs); + RzILOpEffect *op_ASSIGN_16 = WRITE_REG(bundle, &lc1_op, CAST(32, IL_FALSE, Rs)); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, op_ASSIGN_7, op_ASSIGN_12, op_ASSIGN_16); + return instruction_sequence; +} + +// pause(Ii) +RzILOpEffect *hex_il_op_j2_pause(HexInsnPktBundle *bundle) { + // READ + + RzILOpEffect *instruction_sequence = EMPTY(); + return instruction_sequence; +} + +// p3 = sp1loop0(Ii,II) +RzILOpEffect *hex_il_op_j2_ploop1si(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + const HexOp sa0_op = ALIAS2OP(HEX_REG_ALIAS_SA0, false); + RzILOpPure *pc = U32(pkt->pkt_addr); + const HexOp lc0_op = ALIAS2OP(HEX_REG_ALIAS_LC0, false); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + const HexOp P3_op = EXPLICIT2OP(3, HEX_REG_CLASS_PRED_REGS, false); + + // r = r; + RzILOpEffect *imm_assign_0 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_6 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_7 = SETL("r", op_AND_6); + + // sa0 = pc + ((ut32) r); + RzILOpPure *op_ADD_11 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, &sa0_op, op_ADD_11); + + // U = U; + RzILOpEffect *imm_assign_14 = SETL("U", U); + + // lc0 = U; + RzILOpEffect *op_ASSIGN_16 = WRITE_REG(bundle, &lc0_op, VARL("U")); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_19 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG, CAST(32, IL_FALSE, SN(32, 1))); + + // P3 = ((st8) 0x0); + RzILOpEffect *op_ASSIGN_23 = WRITE_REG(bundle, &P3_op, CAST(8, MSB(SN(32, 0)), SN(32, 0))); + + RzILOpEffect *instruction_sequence = SEQN(7, imm_assign_0, imm_assign_14, op_ASSIGN_7, op_ASSIGN_12, op_ASSIGN_16, set_usr_field_call_19, op_ASSIGN_23); + return instruction_sequence; +} + +// p3 = sp1loop0(Ii,Rs) +RzILOpEffect *hex_il_op_j2_ploop1sr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + const HexOp sa0_op = ALIAS2OP(HEX_REG_ALIAS_SA0, false); + RzILOpPure *pc = U32(pkt->pkt_addr); + const HexOp lc0_op = ALIAS2OP(HEX_REG_ALIAS_LC0, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp P3_op = EXPLICIT2OP(3, HEX_REG_CLASS_PRED_REGS, false); + + // r = r; + RzILOpEffect *imm_assign_0 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_6 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_7 = SETL("r", op_AND_6); + + // sa0 = pc + ((ut32) r); + RzILOpPure *op_ADD_11 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, &sa0_op, op_ADD_11); + + // lc0 = ((ut32) Rs); + RzILOpEffect *op_ASSIGN_16 = WRITE_REG(bundle, &lc0_op, CAST(32, IL_FALSE, Rs)); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_19 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG, CAST(32, IL_FALSE, SN(32, 1))); + + // P3 = ((st8) 0x0); + RzILOpEffect *op_ASSIGN_23 = WRITE_REG(bundle, &P3_op, CAST(8, MSB(SN(32, 0)), SN(32, 0))); + + RzILOpEffect *instruction_sequence = SEQN(6, imm_assign_0, op_ASSIGN_7, op_ASSIGN_12, op_ASSIGN_16, set_usr_field_call_19, op_ASSIGN_23); + return instruction_sequence; +} + +// p3 = sp2loop0(Ii,II) +RzILOpEffect *hex_il_op_j2_ploop2si(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + const HexOp sa0_op = ALIAS2OP(HEX_REG_ALIAS_SA0, false); + RzILOpPure *pc = U32(pkt->pkt_addr); + const HexOp lc0_op = ALIAS2OP(HEX_REG_ALIAS_LC0, false); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + const HexOp P3_op = EXPLICIT2OP(3, HEX_REG_CLASS_PRED_REGS, false); + + // r = r; + RzILOpEffect *imm_assign_0 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_6 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_7 = SETL("r", op_AND_6); + + // sa0 = pc + ((ut32) r); + RzILOpPure *op_ADD_11 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, &sa0_op, op_ADD_11); + + // U = U; + RzILOpEffect *imm_assign_14 = SETL("U", U); + + // lc0 = U; + RzILOpEffect *op_ASSIGN_16 = WRITE_REG(bundle, &lc0_op, VARL("U")); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG, ((ut32) 0x2)); + RzILOpEffect *set_usr_field_call_19 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG, CAST(32, IL_FALSE, SN(32, 2))); + + // P3 = ((st8) 0x0); + RzILOpEffect *op_ASSIGN_23 = WRITE_REG(bundle, &P3_op, CAST(8, MSB(SN(32, 0)), SN(32, 0))); + + RzILOpEffect *instruction_sequence = SEQN(7, imm_assign_0, imm_assign_14, op_ASSIGN_7, op_ASSIGN_12, op_ASSIGN_16, set_usr_field_call_19, op_ASSIGN_23); + return instruction_sequence; +} + +// p3 = sp2loop0(Ii,Rs) +RzILOpEffect *hex_il_op_j2_ploop2sr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + const HexOp sa0_op = ALIAS2OP(HEX_REG_ALIAS_SA0, false); + RzILOpPure *pc = U32(pkt->pkt_addr); + const HexOp lc0_op = ALIAS2OP(HEX_REG_ALIAS_LC0, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp P3_op = EXPLICIT2OP(3, HEX_REG_CLASS_PRED_REGS, false); + + // r = r; + RzILOpEffect *imm_assign_0 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_6 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_7 = SETL("r", op_AND_6); + + // sa0 = pc + ((ut32) r); + RzILOpPure *op_ADD_11 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, &sa0_op, op_ADD_11); + + // lc0 = ((ut32) Rs); + RzILOpEffect *op_ASSIGN_16 = WRITE_REG(bundle, &lc0_op, CAST(32, IL_FALSE, Rs)); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG, ((ut32) 0x2)); + RzILOpEffect *set_usr_field_call_19 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG, CAST(32, IL_FALSE, SN(32, 2))); + + // P3 = ((st8) 0x0); + RzILOpEffect *op_ASSIGN_23 = WRITE_REG(bundle, &P3_op, CAST(8, MSB(SN(32, 0)), SN(32, 0))); + + RzILOpEffect *instruction_sequence = SEQN(6, imm_assign_0, op_ASSIGN_7, op_ASSIGN_12, op_ASSIGN_16, set_usr_field_call_19, op_ASSIGN_23); + return instruction_sequence; +} + +// p3 = sp3loop0(Ii,II) +RzILOpEffect *hex_il_op_j2_ploop3si(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + const HexOp sa0_op = ALIAS2OP(HEX_REG_ALIAS_SA0, false); + RzILOpPure *pc = U32(pkt->pkt_addr); + const HexOp lc0_op = ALIAS2OP(HEX_REG_ALIAS_LC0, false); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + const HexOp P3_op = EXPLICIT2OP(3, HEX_REG_CLASS_PRED_REGS, false); + + // r = r; + RzILOpEffect *imm_assign_0 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_6 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_7 = SETL("r", op_AND_6); + + // sa0 = pc + ((ut32) r); + RzILOpPure *op_ADD_11 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, &sa0_op, op_ADD_11); + + // U = U; + RzILOpEffect *imm_assign_14 = SETL("U", U); + + // lc0 = U; + RzILOpEffect *op_ASSIGN_16 = WRITE_REG(bundle, &lc0_op, VARL("U")); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG, ((ut32) 0x3)); + RzILOpEffect *set_usr_field_call_19 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG, CAST(32, IL_FALSE, SN(32, 3))); + + // P3 = ((st8) 0x0); + RzILOpEffect *op_ASSIGN_23 = WRITE_REG(bundle, &P3_op, CAST(8, MSB(SN(32, 0)), SN(32, 0))); + + RzILOpEffect *instruction_sequence = SEQN(7, imm_assign_0, imm_assign_14, op_ASSIGN_7, op_ASSIGN_12, op_ASSIGN_16, set_usr_field_call_19, op_ASSIGN_23); + return instruction_sequence; +} + +// p3 = sp3loop0(Ii,Rs) +RzILOpEffect *hex_il_op_j2_ploop3sr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + const HexOp sa0_op = ALIAS2OP(HEX_REG_ALIAS_SA0, false); + RzILOpPure *pc = U32(pkt->pkt_addr); + const HexOp lc0_op = ALIAS2OP(HEX_REG_ALIAS_LC0, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp P3_op = EXPLICIT2OP(3, HEX_REG_CLASS_PRED_REGS, false); + + // r = r; + RzILOpEffect *imm_assign_0 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_6 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_7 = SETL("r", op_AND_6); + + // sa0 = pc + ((ut32) r); + RzILOpPure *op_ADD_11 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, &sa0_op, op_ADD_11); + + // lc0 = ((ut32) Rs); + RzILOpEffect *op_ASSIGN_16 = WRITE_REG(bundle, &lc0_op, CAST(32, IL_FALSE, Rs)); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG, ((ut32) 0x3)); + RzILOpEffect *set_usr_field_call_19 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG, CAST(32, IL_FALSE, SN(32, 3))); + + // P3 = ((st8) 0x0); + RzILOpEffect *op_ASSIGN_23 = WRITE_REG(bundle, &P3_op, CAST(8, MSB(SN(32, 0)), SN(32, 0))); + + RzILOpEffect *instruction_sequence = SEQN(6, imm_assign_0, op_ASSIGN_7, op_ASSIGN_12, op_ASSIGN_16, set_usr_field_call_19, op_ASSIGN_23); + return instruction_sequence; +} + +// rte +RzILOpEffect *hex_il_op_j2_rte(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// trap0(Ii) +RzILOpEffect *hex_il_op_j2_trap0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // trap(0x0, u); + RzILOpEffect *trap_call_3 = hex_trap(SN(32, 0), VARL("u")); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, trap_call_3); + return instruction_sequence; +} + +// trap1(Rx,Ii) +RzILOpEffect *hex_il_op_j2_trap1(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// unpause +RzILOpEffect *hex_il_op_j2_unpause(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_J4_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_J4_ops.c new file mode 100644 index 00000000000..2406c8a0653 --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_J4_ops.c @@ -0,0 +1,5377 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// if (!cmp.eq(Ns.new,Rt)) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpeq_f_jumpnv_nt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((Ns_new != Rt)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_NE_3 = INV(EQ(Ns_new, Rt)); + RzILOpEffect *branch_18 = BRANCH(op_NE_3, seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// if (!cmp.eq(Ns.new,Rt)) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpeq_f_jumpnv_t(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((Ns_new != Rt)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_NE_3 = INV(EQ(Ns_new, Rt)); + RzILOpEffect *branch_18 = BRANCH(op_NE_3, seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// p0 = cmp.eq(Rs,Rt); if (!p0.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpeq_fp0_jump_nt_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // P0 = ((st8) ((Rs == Rt) ? 0xff : 0x0)); + RzILOpPure *op_EQ_3 = EQ(Rs, Rt); + RzILOpPure *cond_6 = ITE(op_EQ_3, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_6), DUP(cond_6))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// p0 = cmp.eq(Rs,Rt); if (!p0.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpeq_fp0_jump_nt_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (((st32) P0_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// p0 = cmp.eq(Rs,Rt); if (!p0.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpeq_fp0_jump_t_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // P0 = ((st8) ((Rs == Rt) ? 0xff : 0x0)); + RzILOpPure *op_EQ_3 = EQ(Rs, Rt); + RzILOpPure *cond_6 = ITE(op_EQ_3, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_6), DUP(cond_6))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// p0 = cmp.eq(Rs,Rt); if (!p0.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpeq_fp0_jump_t_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (((st32) P0_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// p1 = cmp.eq(Rs,Rt); if (!p1.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpeq_fp1_jump_nt_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // P1 = ((st8) ((Rs == Rt) ? 0xff : 0x0)); + RzILOpPure *op_EQ_3 = EQ(Rs, Rt); + RzILOpPure *cond_6 = ITE(op_EQ_3, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, &P1_op, CAST(8, MSB(cond_6), DUP(cond_6))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// p1 = cmp.eq(Rs,Rt); if (!p1.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpeq_fp1_jump_nt_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_new_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P1_new = READ_REG(pkt, &P1_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (((st32) P1_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P1_new), DUP(P1_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// p1 = cmp.eq(Rs,Rt); if (!p1.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpeq_fp1_jump_t_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // P1 = ((st8) ((Rs == Rt) ? 0xff : 0x0)); + RzILOpPure *op_EQ_3 = EQ(Rs, Rt); + RzILOpPure *cond_6 = ITE(op_EQ_3, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, &P1_op, CAST(8, MSB(cond_6), DUP(cond_6))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// p1 = cmp.eq(Rs,Rt); if (!p1.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpeq_fp1_jump_t_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_new_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P1_new = READ_REG(pkt, &P1_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (((st32) P1_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P1_new), DUP(P1_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// if (cmp.eq(Ns.new,Rt)) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpeq_t_jumpnv_nt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((Ns_new == Rt)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_EQ_3 = EQ(Ns_new, Rt); + RzILOpEffect *branch_18 = BRANCH(op_EQ_3, seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// if (cmp.eq(Ns.new,Rt)) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpeq_t_jumpnv_t(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((Ns_new == Rt)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_EQ_3 = EQ(Ns_new, Rt); + RzILOpEffect *branch_18 = BRANCH(op_EQ_3, seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// p0 = cmp.eq(Rs,Rt); if (p0.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpeq_tp0_jump_nt_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // P0 = ((st8) ((Rs == Rt) ? 0xff : 0x0)); + RzILOpPure *op_EQ_3 = EQ(Rs, Rt); + RzILOpPure *cond_6 = ITE(op_EQ_3, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_6), DUP(cond_6))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// p0 = cmp.eq(Rs,Rt); if (p0.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpeq_tp0_jump_nt_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((((st32) P0_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// p0 = cmp.eq(Rs,Rt); if (p0.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpeq_tp0_jump_t_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // P0 = ((st8) ((Rs == Rt) ? 0xff : 0x0)); + RzILOpPure *op_EQ_3 = EQ(Rs, Rt); + RzILOpPure *cond_6 = ITE(op_EQ_3, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_6), DUP(cond_6))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// p0 = cmp.eq(Rs,Rt); if (p0.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpeq_tp0_jump_t_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((((st32) P0_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// p1 = cmp.eq(Rs,Rt); if (p1.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpeq_tp1_jump_nt_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // P1 = ((st8) ((Rs == Rt) ? 0xff : 0x0)); + RzILOpPure *op_EQ_3 = EQ(Rs, Rt); + RzILOpPure *cond_6 = ITE(op_EQ_3, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, &P1_op, CAST(8, MSB(cond_6), DUP(cond_6))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// p1 = cmp.eq(Rs,Rt); if (p1.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpeq_tp1_jump_nt_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_new_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P1_new = READ_REG(pkt, &P1_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((((st32) P1_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P1_new), DUP(P1_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// p1 = cmp.eq(Rs,Rt); if (p1.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpeq_tp1_jump_t_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // P1 = ((st8) ((Rs == Rt) ? 0xff : 0x0)); + RzILOpPure *op_EQ_3 = EQ(Rs, Rt); + RzILOpPure *cond_6 = ITE(op_EQ_3, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, &P1_op, CAST(8, MSB(cond_6), DUP(cond_6))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// p1 = cmp.eq(Rs,Rt); if (p1.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpeq_tp1_jump_t_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_new_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P1_new = READ_REG(pkt, &P1_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((((st32) P1_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P1_new), DUP(P1_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// if (!cmp.eq(Ns.new,II)) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpeqi_f_jumpnv_nt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // U = U; + RzILOpEffect *imm_assign_2 = SETL("U", U); + + // r = r; + RzILOpEffect *imm_assign_6 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_12 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_13 = SETL("r", op_AND_12); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_16 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_16_17 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_16)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_19 = SEQN(2, op_ASSIGN_13, jump_op_ADD_16_17); + + // if ((((ut32) Ns_new) != U)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_NE_5 = INV(EQ(CAST(32, IL_FALSE, Ns_new), VARL("U"))); + RzILOpEffect *branch_20 = BRANCH(op_NE_5, seq_then_19, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_2, imm_assign_6, branch_20); + return instruction_sequence; +} + +// if (!cmp.eq(Ns.new,II)) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpeqi_f_jumpnv_t(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // U = U; + RzILOpEffect *imm_assign_2 = SETL("U", U); + + // r = r; + RzILOpEffect *imm_assign_6 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_12 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_13 = SETL("r", op_AND_12); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_16 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_16_17 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_16)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_19 = SEQN(2, op_ASSIGN_13, jump_op_ADD_16_17); + + // if ((((ut32) Ns_new) != U)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_NE_5 = INV(EQ(CAST(32, IL_FALSE, Ns_new), VARL("U"))); + RzILOpEffect *branch_20 = BRANCH(op_NE_5, seq_then_19, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_2, imm_assign_6, branch_20); + return instruction_sequence; +} + +// p0 = cmp.eq(Rs,II); if (!p0.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpeqi_fp0_jump_nt_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // U = U; + RzILOpEffect *imm_assign_2 = SETL("U", U); + + // P0 = ((st8) ((((ut32) Rs) == U) ? 0xff : 0x0)); + RzILOpPure *op_EQ_5 = EQ(CAST(32, IL_FALSE, Rs), VARL("U")); + RzILOpPure *cond_8 = ITE(op_EQ_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, op_ASSIGN_10); + return instruction_sequence; +} + +// p0 = cmp.eq(Rs,II); if (!p0.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpeqi_fp0_jump_nt_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (((st32) P0_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// p0 = cmp.eq(Rs,II); if (!p0.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpeqi_fp0_jump_t_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // U = U; + RzILOpEffect *imm_assign_2 = SETL("U", U); + + // P0 = ((st8) ((((ut32) Rs) == U) ? 0xff : 0x0)); + RzILOpPure *op_EQ_5 = EQ(CAST(32, IL_FALSE, Rs), VARL("U")); + RzILOpPure *cond_8 = ITE(op_EQ_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, op_ASSIGN_10); + return instruction_sequence; +} + +// p0 = cmp.eq(Rs,II); if (!p0.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpeqi_fp0_jump_t_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (((st32) P0_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// p1 = cmp.eq(Rs,II); if (!p1.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpeqi_fp1_jump_nt_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // U = U; + RzILOpEffect *imm_assign_2 = SETL("U", U); + + // P1 = ((st8) ((((ut32) Rs) == U) ? 0xff : 0x0)); + RzILOpPure *op_EQ_5 = EQ(CAST(32, IL_FALSE, Rs), VARL("U")); + RzILOpPure *cond_8 = ITE(op_EQ_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, &P1_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, op_ASSIGN_10); + return instruction_sequence; +} + +// p1 = cmp.eq(Rs,II); if (!p1.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpeqi_fp1_jump_nt_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_new_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P1_new = READ_REG(pkt, &P1_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (((st32) P1_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P1_new), DUP(P1_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// p1 = cmp.eq(Rs,II); if (!p1.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpeqi_fp1_jump_t_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // U = U; + RzILOpEffect *imm_assign_2 = SETL("U", U); + + // P1 = ((st8) ((((ut32) Rs) == U) ? 0xff : 0x0)); + RzILOpPure *op_EQ_5 = EQ(CAST(32, IL_FALSE, Rs), VARL("U")); + RzILOpPure *cond_8 = ITE(op_EQ_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, &P1_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, op_ASSIGN_10); + return instruction_sequence; +} + +// p1 = cmp.eq(Rs,II); if (!p1.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpeqi_fp1_jump_t_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_new_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P1_new = READ_REG(pkt, &P1_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (((st32) P1_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P1_new), DUP(P1_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// if (cmp.eq(Ns.new,II)) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpeqi_t_jumpnv_nt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // U = U; + RzILOpEffect *imm_assign_2 = SETL("U", U); + + // r = r; + RzILOpEffect *imm_assign_6 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_12 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_13 = SETL("r", op_AND_12); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_16 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_16_17 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_16)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_19 = SEQN(2, op_ASSIGN_13, jump_op_ADD_16_17); + + // if ((((ut32) Ns_new) == U)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_EQ_5 = EQ(CAST(32, IL_FALSE, Ns_new), VARL("U")); + RzILOpEffect *branch_20 = BRANCH(op_EQ_5, seq_then_19, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_2, imm_assign_6, branch_20); + return instruction_sequence; +} + +// if (cmp.eq(Ns.new,II)) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpeqi_t_jumpnv_t(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // U = U; + RzILOpEffect *imm_assign_2 = SETL("U", U); + + // r = r; + RzILOpEffect *imm_assign_6 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_12 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_13 = SETL("r", op_AND_12); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_16 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_16_17 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_16)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_19 = SEQN(2, op_ASSIGN_13, jump_op_ADD_16_17); + + // if ((((ut32) Ns_new) == U)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_EQ_5 = EQ(CAST(32, IL_FALSE, Ns_new), VARL("U")); + RzILOpEffect *branch_20 = BRANCH(op_EQ_5, seq_then_19, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_2, imm_assign_6, branch_20); + return instruction_sequence; +} + +// p0 = cmp.eq(Rs,II); if (p0.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpeqi_tp0_jump_nt_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // U = U; + RzILOpEffect *imm_assign_2 = SETL("U", U); + + // P0 = ((st8) ((((ut32) Rs) == U) ? 0xff : 0x0)); + RzILOpPure *op_EQ_5 = EQ(CAST(32, IL_FALSE, Rs), VARL("U")); + RzILOpPure *cond_8 = ITE(op_EQ_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, op_ASSIGN_10); + return instruction_sequence; +} + +// p0 = cmp.eq(Rs,II); if (p0.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpeqi_tp0_jump_nt_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((((st32) P0_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// p0 = cmp.eq(Rs,II); if (p0.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpeqi_tp0_jump_t_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // U = U; + RzILOpEffect *imm_assign_2 = SETL("U", U); + + // P0 = ((st8) ((((ut32) Rs) == U) ? 0xff : 0x0)); + RzILOpPure *op_EQ_5 = EQ(CAST(32, IL_FALSE, Rs), VARL("U")); + RzILOpPure *cond_8 = ITE(op_EQ_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, op_ASSIGN_10); + return instruction_sequence; +} + +// p0 = cmp.eq(Rs,II); if (p0.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpeqi_tp0_jump_t_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((((st32) P0_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// p1 = cmp.eq(Rs,II); if (p1.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpeqi_tp1_jump_nt_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // U = U; + RzILOpEffect *imm_assign_2 = SETL("U", U); + + // P1 = ((st8) ((((ut32) Rs) == U) ? 0xff : 0x0)); + RzILOpPure *op_EQ_5 = EQ(CAST(32, IL_FALSE, Rs), VARL("U")); + RzILOpPure *cond_8 = ITE(op_EQ_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, &P1_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, op_ASSIGN_10); + return instruction_sequence; +} + +// p1 = cmp.eq(Rs,II); if (p1.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpeqi_tp1_jump_nt_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_new_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P1_new = READ_REG(pkt, &P1_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((((st32) P1_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P1_new), DUP(P1_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// p1 = cmp.eq(Rs,II); if (p1.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpeqi_tp1_jump_t_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // U = U; + RzILOpEffect *imm_assign_2 = SETL("U", U); + + // P1 = ((st8) ((((ut32) Rs) == U) ? 0xff : 0x0)); + RzILOpPure *op_EQ_5 = EQ(CAST(32, IL_FALSE, Rs), VARL("U")); + RzILOpPure *cond_8 = ITE(op_EQ_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, &P1_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, op_ASSIGN_10); + return instruction_sequence; +} + +// p1 = cmp.eq(Rs,II); if (p1.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpeqi_tp1_jump_t_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_new_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P1_new = READ_REG(pkt, &P1_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((((st32) P1_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P1_new), DUP(P1_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// if (!cmp.eq(Ns.new,n1)) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpeqn1_f_jumpnv_nt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if ((Ns_new != -0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_NE_4 = INV(EQ(Ns_new, SN(32, -1))); + RzILOpEffect *branch_19 = BRANCH(op_NE_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// if (!cmp.eq(Ns.new,n1)) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpeqn1_f_jumpnv_t(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if ((Ns_new != -0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_NE_4 = INV(EQ(Ns_new, SN(32, -1))); + RzILOpEffect *branch_19 = BRANCH(op_NE_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// p0 = cmp.eq(Rs,n1); if (!p0.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpeqn1_fp0_jump_nt_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // P0 = ((st8) ((Rs == -0x1) ? 0xff : 0x0)); + RzILOpPure *op_EQ_4 = EQ(Rs, SN(32, -1)); + RzILOpPure *cond_7 = ITE(op_EQ_4, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_7), DUP(cond_7))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_9; + return instruction_sequence; +} + +// p0 = cmp.eq(Rs,n1); if (!p0.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpeqn1_fp0_jump_nt_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (((st32) P0_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// p0 = cmp.eq(Rs,n1); if (!p0.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpeqn1_fp0_jump_t_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // P0 = ((st8) ((Rs == -0x1) ? 0xff : 0x0)); + RzILOpPure *op_EQ_4 = EQ(Rs, SN(32, -1)); + RzILOpPure *cond_7 = ITE(op_EQ_4, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_7), DUP(cond_7))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_9; + return instruction_sequence; +} + +// p0 = cmp.eq(Rs,n1); if (!p0.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpeqn1_fp0_jump_t_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (((st32) P0_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// p1 = cmp.eq(Rs,n1); if (!p1.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpeqn1_fp1_jump_nt_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // P1 = ((st8) ((Rs == -0x1) ? 0xff : 0x0)); + RzILOpPure *op_EQ_4 = EQ(Rs, SN(32, -1)); + RzILOpPure *cond_7 = ITE(op_EQ_4, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, &P1_op, CAST(8, MSB(cond_7), DUP(cond_7))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_9; + return instruction_sequence; +} + +// p1 = cmp.eq(Rs,n1); if (!p1.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpeqn1_fp1_jump_nt_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_new_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P1_new = READ_REG(pkt, &P1_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (((st32) P1_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P1_new), DUP(P1_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// p1 = cmp.eq(Rs,n1); if (!p1.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpeqn1_fp1_jump_t_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // P1 = ((st8) ((Rs == -0x1) ? 0xff : 0x0)); + RzILOpPure *op_EQ_4 = EQ(Rs, SN(32, -1)); + RzILOpPure *cond_7 = ITE(op_EQ_4, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, &P1_op, CAST(8, MSB(cond_7), DUP(cond_7))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_9; + return instruction_sequence; +} + +// p1 = cmp.eq(Rs,n1); if (!p1.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpeqn1_fp1_jump_t_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_new_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P1_new = READ_REG(pkt, &P1_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (((st32) P1_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P1_new), DUP(P1_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// if (cmp.eq(Ns.new,n1)) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpeqn1_t_jumpnv_nt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if ((Ns_new == -0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_EQ_4 = EQ(Ns_new, SN(32, -1)); + RzILOpEffect *branch_19 = BRANCH(op_EQ_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// if (cmp.eq(Ns.new,n1)) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpeqn1_t_jumpnv_t(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if ((Ns_new == -0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_EQ_4 = EQ(Ns_new, SN(32, -1)); + RzILOpEffect *branch_19 = BRANCH(op_EQ_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// p0 = cmp.eq(Rs,n1); if (p0.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpeqn1_tp0_jump_nt_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // P0 = ((st8) ((Rs == -0x1) ? 0xff : 0x0)); + RzILOpPure *op_EQ_4 = EQ(Rs, SN(32, -1)); + RzILOpPure *cond_7 = ITE(op_EQ_4, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_7), DUP(cond_7))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_9; + return instruction_sequence; +} + +// p0 = cmp.eq(Rs,n1); if (p0.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpeqn1_tp0_jump_nt_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((((st32) P0_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// p0 = cmp.eq(Rs,n1); if (p0.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpeqn1_tp0_jump_t_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // P0 = ((st8) ((Rs == -0x1) ? 0xff : 0x0)); + RzILOpPure *op_EQ_4 = EQ(Rs, SN(32, -1)); + RzILOpPure *cond_7 = ITE(op_EQ_4, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_7), DUP(cond_7))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_9; + return instruction_sequence; +} + +// p0 = cmp.eq(Rs,n1); if (p0.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpeqn1_tp0_jump_t_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((((st32) P0_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// p1 = cmp.eq(Rs,n1); if (p1.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpeqn1_tp1_jump_nt_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // P1 = ((st8) ((Rs == -0x1) ? 0xff : 0x0)); + RzILOpPure *op_EQ_4 = EQ(Rs, SN(32, -1)); + RzILOpPure *cond_7 = ITE(op_EQ_4, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, &P1_op, CAST(8, MSB(cond_7), DUP(cond_7))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_9; + return instruction_sequence; +} + +// p1 = cmp.eq(Rs,n1); if (p1.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpeqn1_tp1_jump_nt_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_new_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P1_new = READ_REG(pkt, &P1_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((((st32) P1_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P1_new), DUP(P1_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// p1 = cmp.eq(Rs,n1); if (p1.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpeqn1_tp1_jump_t_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // P1 = ((st8) ((Rs == -0x1) ? 0xff : 0x0)); + RzILOpPure *op_EQ_4 = EQ(Rs, SN(32, -1)); + RzILOpPure *cond_7 = ITE(op_EQ_4, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, &P1_op, CAST(8, MSB(cond_7), DUP(cond_7))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_9; + return instruction_sequence; +} + +// p1 = cmp.eq(Rs,n1); if (p1.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpeqn1_tp1_jump_t_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_new_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P1_new = READ_REG(pkt, &P1_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((((st32) P1_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P1_new), DUP(P1_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// if (!cmp.gt(Ns.new,Rt)) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgt_f_jumpnv_nt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (Ns_new > Rt)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_GT_3 = SGT(Ns_new, Rt); + RzILOpPure *op_INV_4 = INV(op_GT_3); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// if (!cmp.gt(Ns.new,Rt)) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgt_f_jumpnv_t(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (Ns_new > Rt)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_GT_3 = SGT(Ns_new, Rt); + RzILOpPure *op_INV_4 = INV(op_GT_3); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// p0 = cmp.gt(Rs,Rt); if (!p0.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgt_fp0_jump_nt_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // P0 = ((st8) ((Rs > Rt) ? 0xff : 0x0)); + RzILOpPure *op_GT_3 = SGT(Rs, Rt); + RzILOpPure *cond_6 = ITE(op_GT_3, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_6), DUP(cond_6))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// p0 = cmp.gt(Rs,Rt); if (!p0.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgt_fp0_jump_nt_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (((st32) P0_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// p0 = cmp.gt(Rs,Rt); if (!p0.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgt_fp0_jump_t_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // P0 = ((st8) ((Rs > Rt) ? 0xff : 0x0)); + RzILOpPure *op_GT_3 = SGT(Rs, Rt); + RzILOpPure *cond_6 = ITE(op_GT_3, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_6), DUP(cond_6))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// p0 = cmp.gt(Rs,Rt); if (!p0.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgt_fp0_jump_t_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (((st32) P0_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// p1 = cmp.gt(Rs,Rt); if (!p1.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgt_fp1_jump_nt_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // P1 = ((st8) ((Rs > Rt) ? 0xff : 0x0)); + RzILOpPure *op_GT_3 = SGT(Rs, Rt); + RzILOpPure *cond_6 = ITE(op_GT_3, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, &P1_op, CAST(8, MSB(cond_6), DUP(cond_6))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// p1 = cmp.gt(Rs,Rt); if (!p1.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgt_fp1_jump_nt_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_new_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P1_new = READ_REG(pkt, &P1_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (((st32) P1_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P1_new), DUP(P1_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// p1 = cmp.gt(Rs,Rt); if (!p1.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgt_fp1_jump_t_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // P1 = ((st8) ((Rs > Rt) ? 0xff : 0x0)); + RzILOpPure *op_GT_3 = SGT(Rs, Rt); + RzILOpPure *cond_6 = ITE(op_GT_3, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, &P1_op, CAST(8, MSB(cond_6), DUP(cond_6))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// p1 = cmp.gt(Rs,Rt); if (!p1.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgt_fp1_jump_t_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_new_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P1_new = READ_REG(pkt, &P1_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (((st32) P1_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P1_new), DUP(P1_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// if (cmp.gt(Ns.new,Rt)) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgt_t_jumpnv_nt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((Ns_new > Rt)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_GT_3 = SGT(Ns_new, Rt); + RzILOpEffect *branch_18 = BRANCH(op_GT_3, seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// if (cmp.gt(Ns.new,Rt)) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgt_t_jumpnv_t(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((Ns_new > Rt)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_GT_3 = SGT(Ns_new, Rt); + RzILOpEffect *branch_18 = BRANCH(op_GT_3, seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// p0 = cmp.gt(Rs,Rt); if (p0.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgt_tp0_jump_nt_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // P0 = ((st8) ((Rs > Rt) ? 0xff : 0x0)); + RzILOpPure *op_GT_3 = SGT(Rs, Rt); + RzILOpPure *cond_6 = ITE(op_GT_3, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_6), DUP(cond_6))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// p0 = cmp.gt(Rs,Rt); if (p0.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgt_tp0_jump_nt_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((((st32) P0_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// p0 = cmp.gt(Rs,Rt); if (p0.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgt_tp0_jump_t_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // P0 = ((st8) ((Rs > Rt) ? 0xff : 0x0)); + RzILOpPure *op_GT_3 = SGT(Rs, Rt); + RzILOpPure *cond_6 = ITE(op_GT_3, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_6), DUP(cond_6))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// p0 = cmp.gt(Rs,Rt); if (p0.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgt_tp0_jump_t_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((((st32) P0_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// p1 = cmp.gt(Rs,Rt); if (p1.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgt_tp1_jump_nt_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // P1 = ((st8) ((Rs > Rt) ? 0xff : 0x0)); + RzILOpPure *op_GT_3 = SGT(Rs, Rt); + RzILOpPure *cond_6 = ITE(op_GT_3, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, &P1_op, CAST(8, MSB(cond_6), DUP(cond_6))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// p1 = cmp.gt(Rs,Rt); if (p1.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgt_tp1_jump_nt_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_new_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P1_new = READ_REG(pkt, &P1_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((((st32) P1_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P1_new), DUP(P1_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// p1 = cmp.gt(Rs,Rt); if (p1.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgt_tp1_jump_t_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // P1 = ((st8) ((Rs > Rt) ? 0xff : 0x0)); + RzILOpPure *op_GT_3 = SGT(Rs, Rt); + RzILOpPure *cond_6 = ITE(op_GT_3, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, &P1_op, CAST(8, MSB(cond_6), DUP(cond_6))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// p1 = cmp.gt(Rs,Rt); if (p1.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgt_tp1_jump_t_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_new_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P1_new = READ_REG(pkt, &P1_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((((st32) P1_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P1_new), DUP(P1_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// if (!cmp.gt(Ns.new,II)) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgti_f_jumpnv_nt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // U = U; + RzILOpEffect *imm_assign_2 = SETL("U", U); + + // r = r; + RzILOpEffect *imm_assign_7 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_13 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_14 = SETL("r", op_AND_13); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_17 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_17_18 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_17)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_20 = SEQN(2, op_ASSIGN_14, jump_op_ADD_17_18); + + // if (! (((ut32) Ns_new) > U)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Ns_new), VARL("U")); + RzILOpPure *op_INV_6 = INV(op_GT_5); + RzILOpEffect *branch_21 = BRANCH(op_INV_6, seq_then_20, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_2, imm_assign_7, branch_21); + return instruction_sequence; +} + +// if (!cmp.gt(Ns.new,II)) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgti_f_jumpnv_t(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // U = U; + RzILOpEffect *imm_assign_2 = SETL("U", U); + + // r = r; + RzILOpEffect *imm_assign_7 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_13 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_14 = SETL("r", op_AND_13); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_17 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_17_18 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_17)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_20 = SEQN(2, op_ASSIGN_14, jump_op_ADD_17_18); + + // if (! (((ut32) Ns_new) > U)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Ns_new), VARL("U")); + RzILOpPure *op_INV_6 = INV(op_GT_5); + RzILOpEffect *branch_21 = BRANCH(op_INV_6, seq_then_20, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_2, imm_assign_7, branch_21); + return instruction_sequence; +} + +// p0 = cmp.gt(Rs,II); if (!p0.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgti_fp0_jump_nt_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // U = U; + RzILOpEffect *imm_assign_2 = SETL("U", U); + + // P0 = ((st8) ((((ut32) Rs) > U) ? 0xff : 0x0)); + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Rs), VARL("U")); + RzILOpPure *cond_8 = ITE(op_GT_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, op_ASSIGN_10); + return instruction_sequence; +} + +// p0 = cmp.gt(Rs,II); if (!p0.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgti_fp0_jump_nt_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (((st32) P0_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// p0 = cmp.gt(Rs,II); if (!p0.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgti_fp0_jump_t_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // U = U; + RzILOpEffect *imm_assign_2 = SETL("U", U); + + // P0 = ((st8) ((((ut32) Rs) > U) ? 0xff : 0x0)); + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Rs), VARL("U")); + RzILOpPure *cond_8 = ITE(op_GT_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, op_ASSIGN_10); + return instruction_sequence; +} + +// p0 = cmp.gt(Rs,II); if (!p0.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgti_fp0_jump_t_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (((st32) P0_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// p1 = cmp.gt(Rs,II); if (!p1.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgti_fp1_jump_nt_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // U = U; + RzILOpEffect *imm_assign_2 = SETL("U", U); + + // P1 = ((st8) ((((ut32) Rs) > U) ? 0xff : 0x0)); + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Rs), VARL("U")); + RzILOpPure *cond_8 = ITE(op_GT_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, &P1_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, op_ASSIGN_10); + return instruction_sequence; +} + +// p1 = cmp.gt(Rs,II); if (!p1.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgti_fp1_jump_nt_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_new_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P1_new = READ_REG(pkt, &P1_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (((st32) P1_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P1_new), DUP(P1_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// p1 = cmp.gt(Rs,II); if (!p1.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgti_fp1_jump_t_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // U = U; + RzILOpEffect *imm_assign_2 = SETL("U", U); + + // P1 = ((st8) ((((ut32) Rs) > U) ? 0xff : 0x0)); + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Rs), VARL("U")); + RzILOpPure *cond_8 = ITE(op_GT_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, &P1_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, op_ASSIGN_10); + return instruction_sequence; +} + +// p1 = cmp.gt(Rs,II); if (!p1.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgti_fp1_jump_t_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_new_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P1_new = READ_REG(pkt, &P1_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (((st32) P1_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P1_new), DUP(P1_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// if (cmp.gt(Ns.new,II)) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgti_t_jumpnv_nt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // U = U; + RzILOpEffect *imm_assign_2 = SETL("U", U); + + // r = r; + RzILOpEffect *imm_assign_6 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_12 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_13 = SETL("r", op_AND_12); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_16 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_16_17 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_16)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_19 = SEQN(2, op_ASSIGN_13, jump_op_ADD_16_17); + + // if ((((ut32) Ns_new) > U)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Ns_new), VARL("U")); + RzILOpEffect *branch_20 = BRANCH(op_GT_5, seq_then_19, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_2, imm_assign_6, branch_20); + return instruction_sequence; +} + +// if (cmp.gt(Ns.new,II)) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgti_t_jumpnv_t(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // U = U; + RzILOpEffect *imm_assign_2 = SETL("U", U); + + // r = r; + RzILOpEffect *imm_assign_6 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_12 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_13 = SETL("r", op_AND_12); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_16 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_16_17 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_16)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_19 = SEQN(2, op_ASSIGN_13, jump_op_ADD_16_17); + + // if ((((ut32) Ns_new) > U)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Ns_new), VARL("U")); + RzILOpEffect *branch_20 = BRANCH(op_GT_5, seq_then_19, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_2, imm_assign_6, branch_20); + return instruction_sequence; +} + +// p0 = cmp.gt(Rs,II); if (p0.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgti_tp0_jump_nt_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // U = U; + RzILOpEffect *imm_assign_2 = SETL("U", U); + + // P0 = ((st8) ((((ut32) Rs) > U) ? 0xff : 0x0)); + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Rs), VARL("U")); + RzILOpPure *cond_8 = ITE(op_GT_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, op_ASSIGN_10); + return instruction_sequence; +} + +// p0 = cmp.gt(Rs,II); if (p0.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgti_tp0_jump_nt_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((((st32) P0_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// p0 = cmp.gt(Rs,II); if (p0.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgti_tp0_jump_t_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // U = U; + RzILOpEffect *imm_assign_2 = SETL("U", U); + + // P0 = ((st8) ((((ut32) Rs) > U) ? 0xff : 0x0)); + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Rs), VARL("U")); + RzILOpPure *cond_8 = ITE(op_GT_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, op_ASSIGN_10); + return instruction_sequence; +} + +// p0 = cmp.gt(Rs,II); if (p0.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgti_tp0_jump_t_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((((st32) P0_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// p1 = cmp.gt(Rs,II); if (p1.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgti_tp1_jump_nt_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // U = U; + RzILOpEffect *imm_assign_2 = SETL("U", U); + + // P1 = ((st8) ((((ut32) Rs) > U) ? 0xff : 0x0)); + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Rs), VARL("U")); + RzILOpPure *cond_8 = ITE(op_GT_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, &P1_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, op_ASSIGN_10); + return instruction_sequence; +} + +// p1 = cmp.gt(Rs,II); if (p1.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgti_tp1_jump_nt_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_new_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P1_new = READ_REG(pkt, &P1_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((((st32) P1_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P1_new), DUP(P1_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// p1 = cmp.gt(Rs,II); if (p1.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgti_tp1_jump_t_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // U = U; + RzILOpEffect *imm_assign_2 = SETL("U", U); + + // P1 = ((st8) ((((ut32) Rs) > U) ? 0xff : 0x0)); + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Rs), VARL("U")); + RzILOpPure *cond_8 = ITE(op_GT_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, &P1_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, op_ASSIGN_10); + return instruction_sequence; +} + +// p1 = cmp.gt(Rs,II); if (p1.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgti_tp1_jump_t_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_new_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P1_new = READ_REG(pkt, &P1_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((((st32) P1_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P1_new), DUP(P1_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// if (!cmp.gt(Ns.new,n1)) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgtn1_f_jumpnv_nt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_6 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_12 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_13 = SETL("r", op_AND_12); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_16 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_16_17 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_16)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_19 = SEQN(2, op_ASSIGN_13, jump_op_ADD_16_17); + + // if (! (Ns_new > -0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_GT_4 = SGT(Ns_new, SN(32, -1)); + RzILOpPure *op_INV_5 = INV(op_GT_4); + RzILOpEffect *branch_20 = BRANCH(op_INV_5, seq_then_19, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_6, branch_20); + return instruction_sequence; +} + +// if (!cmp.gt(Ns.new,n1)) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgtn1_f_jumpnv_t(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_6 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_12 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_13 = SETL("r", op_AND_12); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_16 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_16_17 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_16)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_19 = SEQN(2, op_ASSIGN_13, jump_op_ADD_16_17); + + // if (! (Ns_new > -0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_GT_4 = SGT(Ns_new, SN(32, -1)); + RzILOpPure *op_INV_5 = INV(op_GT_4); + RzILOpEffect *branch_20 = BRANCH(op_INV_5, seq_then_19, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_6, branch_20); + return instruction_sequence; +} + +// p0 = cmp.gt(Rs,n1); if (!p0.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgtn1_fp0_jump_nt_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // P0 = ((st8) ((Rs > -0x1) ? 0xff : 0x0)); + RzILOpPure *op_GT_4 = SGT(Rs, SN(32, -1)); + RzILOpPure *cond_7 = ITE(op_GT_4, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_7), DUP(cond_7))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_9; + return instruction_sequence; +} + +// p0 = cmp.gt(Rs,n1); if (!p0.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgtn1_fp0_jump_nt_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (((st32) P0_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// p0 = cmp.gt(Rs,n1); if (!p0.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgtn1_fp0_jump_t_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // P0 = ((st8) ((Rs > -0x1) ? 0xff : 0x0)); + RzILOpPure *op_GT_4 = SGT(Rs, SN(32, -1)); + RzILOpPure *cond_7 = ITE(op_GT_4, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_7), DUP(cond_7))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_9; + return instruction_sequence; +} + +// p0 = cmp.gt(Rs,n1); if (!p0.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgtn1_fp0_jump_t_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (((st32) P0_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// p1 = cmp.gt(Rs,n1); if (!p1.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgtn1_fp1_jump_nt_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // P1 = ((st8) ((Rs > -0x1) ? 0xff : 0x0)); + RzILOpPure *op_GT_4 = SGT(Rs, SN(32, -1)); + RzILOpPure *cond_7 = ITE(op_GT_4, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, &P1_op, CAST(8, MSB(cond_7), DUP(cond_7))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_9; + return instruction_sequence; +} + +// p1 = cmp.gt(Rs,n1); if (!p1.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgtn1_fp1_jump_nt_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_new_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P1_new = READ_REG(pkt, &P1_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (((st32) P1_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P1_new), DUP(P1_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// p1 = cmp.gt(Rs,n1); if (!p1.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgtn1_fp1_jump_t_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // P1 = ((st8) ((Rs > -0x1) ? 0xff : 0x0)); + RzILOpPure *op_GT_4 = SGT(Rs, SN(32, -1)); + RzILOpPure *cond_7 = ITE(op_GT_4, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, &P1_op, CAST(8, MSB(cond_7), DUP(cond_7))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_9; + return instruction_sequence; +} + +// p1 = cmp.gt(Rs,n1); if (!p1.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgtn1_fp1_jump_t_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_new_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P1_new = READ_REG(pkt, &P1_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (((st32) P1_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P1_new), DUP(P1_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// if (cmp.gt(Ns.new,n1)) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgtn1_t_jumpnv_nt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if ((Ns_new > -0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_GT_4 = SGT(Ns_new, SN(32, -1)); + RzILOpEffect *branch_19 = BRANCH(op_GT_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// if (cmp.gt(Ns.new,n1)) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgtn1_t_jumpnv_t(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if ((Ns_new > -0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_GT_4 = SGT(Ns_new, SN(32, -1)); + RzILOpEffect *branch_19 = BRANCH(op_GT_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// p0 = cmp.gt(Rs,n1); if (p0.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgtn1_tp0_jump_nt_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // P0 = ((st8) ((Rs > -0x1) ? 0xff : 0x0)); + RzILOpPure *op_GT_4 = SGT(Rs, SN(32, -1)); + RzILOpPure *cond_7 = ITE(op_GT_4, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_7), DUP(cond_7))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_9; + return instruction_sequence; +} + +// p0 = cmp.gt(Rs,n1); if (p0.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgtn1_tp0_jump_nt_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((((st32) P0_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// p0 = cmp.gt(Rs,n1); if (p0.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgtn1_tp0_jump_t_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // P0 = ((st8) ((Rs > -0x1) ? 0xff : 0x0)); + RzILOpPure *op_GT_4 = SGT(Rs, SN(32, -1)); + RzILOpPure *cond_7 = ITE(op_GT_4, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_7), DUP(cond_7))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_9; + return instruction_sequence; +} + +// p0 = cmp.gt(Rs,n1); if (p0.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgtn1_tp0_jump_t_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((((st32) P0_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// p1 = cmp.gt(Rs,n1); if (p1.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgtn1_tp1_jump_nt_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // P1 = ((st8) ((Rs > -0x1) ? 0xff : 0x0)); + RzILOpPure *op_GT_4 = SGT(Rs, SN(32, -1)); + RzILOpPure *cond_7 = ITE(op_GT_4, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, &P1_op, CAST(8, MSB(cond_7), DUP(cond_7))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_9; + return instruction_sequence; +} + +// p1 = cmp.gt(Rs,n1); if (p1.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgtn1_tp1_jump_nt_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_new_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P1_new = READ_REG(pkt, &P1_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((((st32) P1_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P1_new), DUP(P1_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// p1 = cmp.gt(Rs,n1); if (p1.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgtn1_tp1_jump_t_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // P1 = ((st8) ((Rs > -0x1) ? 0xff : 0x0)); + RzILOpPure *op_GT_4 = SGT(Rs, SN(32, -1)); + RzILOpPure *cond_7 = ITE(op_GT_4, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, &P1_op, CAST(8, MSB(cond_7), DUP(cond_7))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_9; + return instruction_sequence; +} + +// p1 = cmp.gt(Rs,n1); if (p1.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgtn1_tp1_jump_t_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_new_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P1_new = READ_REG(pkt, &P1_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((((st32) P1_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P1_new), DUP(P1_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// if (!cmp.gtu(Ns.new,Rt)) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgtu_f_jumpnv_nt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_7 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_13 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_14 = SETL("r", op_AND_13); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_17 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_17_18 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_17)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_20 = SEQN(2, op_ASSIGN_14, jump_op_ADD_17_18); + + // if (! (((ut32) Ns_new) > ((ut32) Rt))) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Ns_new), CAST(32, IL_FALSE, Rt)); + RzILOpPure *op_INV_6 = INV(op_GT_5); + RzILOpEffect *branch_21 = BRANCH(op_INV_6, seq_then_20, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_7, branch_21); + return instruction_sequence; +} + +// if (!cmp.gtu(Ns.new,Rt)) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgtu_f_jumpnv_t(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_7 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_13 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_14 = SETL("r", op_AND_13); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_17 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_17_18 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_17)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_20 = SEQN(2, op_ASSIGN_14, jump_op_ADD_17_18); + + // if (! (((ut32) Ns_new) > ((ut32) Rt))) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Ns_new), CAST(32, IL_FALSE, Rt)); + RzILOpPure *op_INV_6 = INV(op_GT_5); + RzILOpEffect *branch_21 = BRANCH(op_INV_6, seq_then_20, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_7, branch_21); + return instruction_sequence; +} + +// p0 = cmp.gtu(Rs,Rt); if (!p0.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgtu_fp0_jump_nt_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // P0 = ((st8) ((((ut32) Rs) > ((ut32) Rt)) ? 0xff : 0x0)); + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Rs), CAST(32, IL_FALSE, Rt)); + RzILOpPure *cond_8 = ITE(op_GT_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_10; + return instruction_sequence; +} + +// p0 = cmp.gtu(Rs,Rt); if (!p0.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgtu_fp0_jump_nt_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (((st32) P0_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// p0 = cmp.gtu(Rs,Rt); if (!p0.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgtu_fp0_jump_t_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // P0 = ((st8) ((((ut32) Rs) > ((ut32) Rt)) ? 0xff : 0x0)); + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Rs), CAST(32, IL_FALSE, Rt)); + RzILOpPure *cond_8 = ITE(op_GT_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_10; + return instruction_sequence; +} + +// p0 = cmp.gtu(Rs,Rt); if (!p0.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgtu_fp0_jump_t_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (((st32) P0_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// p1 = cmp.gtu(Rs,Rt); if (!p1.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgtu_fp1_jump_nt_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // P1 = ((st8) ((((ut32) Rs) > ((ut32) Rt)) ? 0xff : 0x0)); + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Rs), CAST(32, IL_FALSE, Rt)); + RzILOpPure *cond_8 = ITE(op_GT_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, &P1_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_10; + return instruction_sequence; +} + +// p1 = cmp.gtu(Rs,Rt); if (!p1.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgtu_fp1_jump_nt_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_new_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P1_new = READ_REG(pkt, &P1_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (((st32) P1_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P1_new), DUP(P1_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// p1 = cmp.gtu(Rs,Rt); if (!p1.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgtu_fp1_jump_t_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // P1 = ((st8) ((((ut32) Rs) > ((ut32) Rt)) ? 0xff : 0x0)); + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Rs), CAST(32, IL_FALSE, Rt)); + RzILOpPure *cond_8 = ITE(op_GT_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, &P1_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_10; + return instruction_sequence; +} + +// p1 = cmp.gtu(Rs,Rt); if (!p1.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgtu_fp1_jump_t_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_new_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P1_new = READ_REG(pkt, &P1_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (((st32) P1_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P1_new), DUP(P1_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// if (cmp.gtu(Ns.new,Rt)) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgtu_t_jumpnv_nt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_6 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_12 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_13 = SETL("r", op_AND_12); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_16 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_16_17 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_16)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_19 = SEQN(2, op_ASSIGN_13, jump_op_ADD_16_17); + + // if ((((ut32) Ns_new) > ((ut32) Rt))) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Ns_new), CAST(32, IL_FALSE, Rt)); + RzILOpEffect *branch_20 = BRANCH(op_GT_5, seq_then_19, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_6, branch_20); + return instruction_sequence; +} + +// if (cmp.gtu(Ns.new,Rt)) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgtu_t_jumpnv_t(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_6 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_12 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_13 = SETL("r", op_AND_12); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_16 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_16_17 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_16)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_19 = SEQN(2, op_ASSIGN_13, jump_op_ADD_16_17); + + // if ((((ut32) Ns_new) > ((ut32) Rt))) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Ns_new), CAST(32, IL_FALSE, Rt)); + RzILOpEffect *branch_20 = BRANCH(op_GT_5, seq_then_19, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_6, branch_20); + return instruction_sequence; +} + +// p0 = cmp.gtu(Rs,Rt); if (p0.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgtu_tp0_jump_nt_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // P0 = ((st8) ((((ut32) Rs) > ((ut32) Rt)) ? 0xff : 0x0)); + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Rs), CAST(32, IL_FALSE, Rt)); + RzILOpPure *cond_8 = ITE(op_GT_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_10; + return instruction_sequence; +} + +// p0 = cmp.gtu(Rs,Rt); if (p0.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgtu_tp0_jump_nt_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((((st32) P0_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// p0 = cmp.gtu(Rs,Rt); if (p0.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgtu_tp0_jump_t_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // P0 = ((st8) ((((ut32) Rs) > ((ut32) Rt)) ? 0xff : 0x0)); + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Rs), CAST(32, IL_FALSE, Rt)); + RzILOpPure *cond_8 = ITE(op_GT_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_10; + return instruction_sequence; +} + +// p0 = cmp.gtu(Rs,Rt); if (p0.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgtu_tp0_jump_t_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((((st32) P0_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// p1 = cmp.gtu(Rs,Rt); if (p1.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgtu_tp1_jump_nt_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // P1 = ((st8) ((((ut32) Rs) > ((ut32) Rt)) ? 0xff : 0x0)); + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Rs), CAST(32, IL_FALSE, Rt)); + RzILOpPure *cond_8 = ITE(op_GT_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, &P1_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_10; + return instruction_sequence; +} + +// p1 = cmp.gtu(Rs,Rt); if (p1.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgtu_tp1_jump_nt_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_new_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P1_new = READ_REG(pkt, &P1_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((((st32) P1_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P1_new), DUP(P1_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// p1 = cmp.gtu(Rs,Rt); if (p1.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgtu_tp1_jump_t_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // P1 = ((st8) ((((ut32) Rs) > ((ut32) Rt)) ? 0xff : 0x0)); + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Rs), CAST(32, IL_FALSE, Rt)); + RzILOpPure *cond_8 = ITE(op_GT_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, &P1_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_10; + return instruction_sequence; +} + +// p1 = cmp.gtu(Rs,Rt); if (p1.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgtu_tp1_jump_t_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_new_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P1_new = READ_REG(pkt, &P1_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((((st32) P1_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P1_new), DUP(P1_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// if (!cmp.gtu(Ns.new,II)) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgtui_f_jumpnv_nt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // U = U; + RzILOpEffect *imm_assign_3 = SETL("U", U); + + // r = r; + RzILOpEffect *imm_assign_7 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_13 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_14 = SETL("r", op_AND_13); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_17 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_17_18 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_17)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_20 = SEQN(2, op_ASSIGN_14, jump_op_ADD_17_18); + + // if (! (((ut32) Ns_new) > U)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Ns_new), VARL("U")); + RzILOpPure *op_INV_6 = INV(op_GT_5); + RzILOpEffect *branch_21 = BRANCH(op_INV_6, seq_then_20, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, imm_assign_7, branch_21); + return instruction_sequence; +} + +// if (!cmp.gtu(Ns.new,II)) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgtui_f_jumpnv_t(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // U = U; + RzILOpEffect *imm_assign_3 = SETL("U", U); + + // r = r; + RzILOpEffect *imm_assign_7 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_13 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_14 = SETL("r", op_AND_13); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_17 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_17_18 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_17)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_20 = SEQN(2, op_ASSIGN_14, jump_op_ADD_17_18); + + // if (! (((ut32) Ns_new) > U)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Ns_new), VARL("U")); + RzILOpPure *op_INV_6 = INV(op_GT_5); + RzILOpEffect *branch_21 = BRANCH(op_INV_6, seq_then_20, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, imm_assign_7, branch_21); + return instruction_sequence; +} + +// p0 = cmp.gtu(Rs,II); if (!p0.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgtui_fp0_jump_nt_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // U = U; + RzILOpEffect *imm_assign_3 = SETL("U", U); + + // P0 = ((st8) ((((ut32) Rs) > U) ? 0xff : 0x0)); + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Rs), VARL("U")); + RzILOpPure *cond_8 = ITE(op_GT_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_3, op_ASSIGN_10); + return instruction_sequence; +} + +// p0 = cmp.gtu(Rs,II); if (!p0.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgtui_fp0_jump_nt_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (((st32) P0_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// p0 = cmp.gtu(Rs,II); if (!p0.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgtui_fp0_jump_t_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // U = U; + RzILOpEffect *imm_assign_3 = SETL("U", U); + + // P0 = ((st8) ((((ut32) Rs) > U) ? 0xff : 0x0)); + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Rs), VARL("U")); + RzILOpPure *cond_8 = ITE(op_GT_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_3, op_ASSIGN_10); + return instruction_sequence; +} + +// p0 = cmp.gtu(Rs,II); if (!p0.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgtui_fp0_jump_t_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (((st32) P0_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// p1 = cmp.gtu(Rs,II); if (!p1.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgtui_fp1_jump_nt_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // U = U; + RzILOpEffect *imm_assign_3 = SETL("U", U); + + // P1 = ((st8) ((((ut32) Rs) > U) ? 0xff : 0x0)); + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Rs), VARL("U")); + RzILOpPure *cond_8 = ITE(op_GT_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, &P1_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_3, op_ASSIGN_10); + return instruction_sequence; +} + +// p1 = cmp.gtu(Rs,II); if (!p1.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgtui_fp1_jump_nt_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_new_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P1_new = READ_REG(pkt, &P1_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (((st32) P1_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P1_new), DUP(P1_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// p1 = cmp.gtu(Rs,II); if (!p1.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgtui_fp1_jump_t_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // U = U; + RzILOpEffect *imm_assign_3 = SETL("U", U); + + // P1 = ((st8) ((((ut32) Rs) > U) ? 0xff : 0x0)); + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Rs), VARL("U")); + RzILOpPure *cond_8 = ITE(op_GT_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, &P1_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_3, op_ASSIGN_10); + return instruction_sequence; +} + +// p1 = cmp.gtu(Rs,II); if (!p1.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgtui_fp1_jump_t_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_new_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P1_new = READ_REG(pkt, &P1_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (((st32) P1_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P1_new), DUP(P1_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// if (cmp.gtu(Ns.new,II)) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgtui_t_jumpnv_nt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // U = U; + RzILOpEffect *imm_assign_3 = SETL("U", U); + + // r = r; + RzILOpEffect *imm_assign_6 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_12 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_13 = SETL("r", op_AND_12); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_16 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_16_17 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_16)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_19 = SEQN(2, op_ASSIGN_13, jump_op_ADD_16_17); + + // if ((((ut32) Ns_new) > U)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Ns_new), VARL("U")); + RzILOpEffect *branch_20 = BRANCH(op_GT_5, seq_then_19, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, imm_assign_6, branch_20); + return instruction_sequence; +} + +// if (cmp.gtu(Ns.new,II)) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgtui_t_jumpnv_t(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // U = U; + RzILOpEffect *imm_assign_3 = SETL("U", U); + + // r = r; + RzILOpEffect *imm_assign_6 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_12 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_13 = SETL("r", op_AND_12); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_16 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_16_17 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_16)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_19 = SEQN(2, op_ASSIGN_13, jump_op_ADD_16_17); + + // if ((((ut32) Ns_new) > U)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Ns_new), VARL("U")); + RzILOpEffect *branch_20 = BRANCH(op_GT_5, seq_then_19, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, imm_assign_6, branch_20); + return instruction_sequence; +} + +// p0 = cmp.gtu(Rs,II); if (p0.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgtui_tp0_jump_nt_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // U = U; + RzILOpEffect *imm_assign_3 = SETL("U", U); + + // P0 = ((st8) ((((ut32) Rs) > U) ? 0xff : 0x0)); + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Rs), VARL("U")); + RzILOpPure *cond_8 = ITE(op_GT_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_3, op_ASSIGN_10); + return instruction_sequence; +} + +// p0 = cmp.gtu(Rs,II); if (p0.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgtui_tp0_jump_nt_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((((st32) P0_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// p0 = cmp.gtu(Rs,II); if (p0.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgtui_tp0_jump_t_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // U = U; + RzILOpEffect *imm_assign_3 = SETL("U", U); + + // P0 = ((st8) ((((ut32) Rs) > U) ? 0xff : 0x0)); + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Rs), VARL("U")); + RzILOpPure *cond_8 = ITE(op_GT_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_3, op_ASSIGN_10); + return instruction_sequence; +} + +// p0 = cmp.gtu(Rs,II); if (p0.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgtui_tp0_jump_t_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((((st32) P0_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// p1 = cmp.gtu(Rs,II); if (p1.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgtui_tp1_jump_nt_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // U = U; + RzILOpEffect *imm_assign_3 = SETL("U", U); + + // P1 = ((st8) ((((ut32) Rs) > U) ? 0xff : 0x0)); + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Rs), VARL("U")); + RzILOpPure *cond_8 = ITE(op_GT_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, &P1_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_3, op_ASSIGN_10); + return instruction_sequence; +} + +// p1 = cmp.gtu(Rs,II); if (p1.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpgtui_tp1_jump_nt_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_new_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P1_new = READ_REG(pkt, &P1_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((((st32) P1_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P1_new), DUP(P1_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// p1 = cmp.gtu(Rs,II); if (p1.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgtui_tp1_jump_t_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // U = U; + RzILOpEffect *imm_assign_3 = SETL("U", U); + + // P1 = ((st8) ((((ut32) Rs) > U) ? 0xff : 0x0)); + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Rs), VARL("U")); + RzILOpPure *cond_8 = ITE(op_GT_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, &P1_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_3, op_ASSIGN_10); + return instruction_sequence; +} + +// p1 = cmp.gtu(Rs,II); if (p1.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpgtui_tp1_jump_t_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_new_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P1_new = READ_REG(pkt, &P1_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((((st32) P1_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P1_new), DUP(P1_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// if (!cmp.gt(Rt,Ns.new)) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmplt_f_jumpnv_nt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (Rt > Ns_new)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_GT_3 = SGT(Rt, Ns_new); + RzILOpPure *op_INV_4 = INV(op_GT_3); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// if (!cmp.gt(Rt,Ns.new)) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmplt_f_jumpnv_t(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (Rt > Ns_new)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_GT_3 = SGT(Rt, Ns_new); + RzILOpPure *op_INV_4 = INV(op_GT_3); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// if (cmp.gt(Rt,Ns.new)) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmplt_t_jumpnv_nt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((Rt > Ns_new)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_GT_3 = SGT(Rt, Ns_new); + RzILOpEffect *branch_18 = BRANCH(op_GT_3, seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// if (cmp.gt(Rt,Ns.new)) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmplt_t_jumpnv_t(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((Rt > Ns_new)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_GT_3 = SGT(Rt, Ns_new); + RzILOpEffect *branch_18 = BRANCH(op_GT_3, seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// if (!cmp.gtu(Rt,Ns.new)) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpltu_f_jumpnv_nt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_7 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_13 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_14 = SETL("r", op_AND_13); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_17 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_17_18 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_17)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_20 = SEQN(2, op_ASSIGN_14, jump_op_ADD_17_18); + + // if (! (((ut32) Rt) > ((ut32) Ns_new))) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Rt), CAST(32, IL_FALSE, Ns_new)); + RzILOpPure *op_INV_6 = INV(op_GT_5); + RzILOpEffect *branch_21 = BRANCH(op_INV_6, seq_then_20, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_7, branch_21); + return instruction_sequence; +} + +// if (!cmp.gtu(Rt,Ns.new)) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpltu_f_jumpnv_t(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_7 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_13 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_14 = SETL("r", op_AND_13); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_17 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_17_18 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_17)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_20 = SEQN(2, op_ASSIGN_14, jump_op_ADD_17_18); + + // if (! (((ut32) Rt) > ((ut32) Ns_new))) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Rt), CAST(32, IL_FALSE, Ns_new)); + RzILOpPure *op_INV_6 = INV(op_GT_5); + RzILOpEffect *branch_21 = BRANCH(op_INV_6, seq_then_20, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_7, branch_21); + return instruction_sequence; +} + +// if (cmp.gtu(Rt,Ns.new)) jump:nt Ii +RzILOpEffect *hex_il_op_j4_cmpltu_t_jumpnv_nt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_6 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_12 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_13 = SETL("r", op_AND_12); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_16 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_16_17 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_16)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_19 = SEQN(2, op_ASSIGN_13, jump_op_ADD_16_17); + + // if ((((ut32) Rt) > ((ut32) Ns_new))) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Rt), CAST(32, IL_FALSE, Ns_new)); + RzILOpEffect *branch_20 = BRANCH(op_GT_5, seq_then_19, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_6, branch_20); + return instruction_sequence; +} + +// if (cmp.gtu(Rt,Ns.new)) jump:t Ii +RzILOpEffect *hex_il_op_j4_cmpltu_t_jumpnv_t(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_6 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_12 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_13 = SETL("r", op_AND_12); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_16 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_16_17 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_16)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_19 = SEQN(2, op_ASSIGN_13, jump_op_ADD_16_17); + + // if ((((ut32) Rt) > ((ut32) Ns_new))) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_GT_5 = UGT(CAST(32, IL_FALSE, Rt), CAST(32, IL_FALSE, Ns_new)); + RzILOpEffect *branch_20 = BRANCH(op_GT_5, seq_then_19, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_6, branch_20); + return instruction_sequence; +} + +// hintjr(Rs) +RzILOpEffect *hex_il_op_j4_hintjumpr(HexInsnPktBundle *bundle) { + // READ + + RzILOpEffect *instruction_sequence = EMPTY(); + return instruction_sequence; +} + +// Rd = II ; jump Ii +RzILOpEffect *hex_il_op_j4_jumpseti(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_0 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_6 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_7 = SETL("r", op_AND_6); + + // U = U; + RzILOpEffect *imm_assign_9 = SETL("U", U); + + // Rd = ((st32) U); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, VARL("U"))); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_0, imm_assign_9, op_ASSIGN_7, op_ASSIGN_12, jump_op_ADD_15_16); + return instruction_sequence; +} + +// Rd = Rs ; jump Ii +RzILOpEffect *hex_il_op_j4_jumpsetr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_0 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_6 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_7 = SETL("r", op_AND_6); + + // Rd = Rs; + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, Rd_op, Rs); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_13 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_13_14 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_13)); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, op_ASSIGN_7, op_ASSIGN_10, jump_op_ADD_13_14); + return instruction_sequence; +} + +// if (!tstbit(Ns.new,#0)) jump:nt Ii +RzILOpEffect *hex_il_op_j4_tstbit0_f_jumpnv_nt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (Ns_new & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(Ns_new, SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// if (!tstbit(Ns.new,#0)) jump:t Ii +RzILOpEffect *hex_il_op_j4_tstbit0_f_jumpnv_t(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (Ns_new & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(Ns_new, SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// p0 = tstbit(Rs,#0); if (!p0.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_tstbit0_fp0_jump_nt_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // P0 = ((st8) ((Rs & 0x1) ? 0xff : 0x0)); + RzILOpPure *op_AND_3 = LOGAND(Rs, SN(32, 1)); + RzILOpPure *cond_6 = ITE(NON_ZERO(op_AND_3), SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_6), DUP(cond_6))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// p0 = tstbit(Rs,#0); if (!p0.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_tstbit0_fp0_jump_nt_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (((st32) P0_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// p0 = tstbit(Rs,#0); if (!p0.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_tstbit0_fp0_jump_t_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // P0 = ((st8) ((Rs & 0x1) ? 0xff : 0x0)); + RzILOpPure *op_AND_3 = LOGAND(Rs, SN(32, 1)); + RzILOpPure *cond_6 = ITE(NON_ZERO(op_AND_3), SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_6), DUP(cond_6))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// p0 = tstbit(Rs,#0); if (!p0.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_tstbit0_fp0_jump_t_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (((st32) P0_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// p1 = tstbit(Rs,#0); if (!p1.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_tstbit0_fp1_jump_nt_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // P1 = ((st8) ((Rs & 0x1) ? 0xff : 0x0)); + RzILOpPure *op_AND_3 = LOGAND(Rs, SN(32, 1)); + RzILOpPure *cond_6 = ITE(NON_ZERO(op_AND_3), SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, &P1_op, CAST(8, MSB(cond_6), DUP(cond_6))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// p1 = tstbit(Rs,#0); if (!p1.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_tstbit0_fp1_jump_nt_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_new_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P1_new = READ_REG(pkt, &P1_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (((st32) P1_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P1_new), DUP(P1_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// p1 = tstbit(Rs,#0); if (!p1.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_tstbit0_fp1_jump_t_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // P1 = ((st8) ((Rs & 0x1) ? 0xff : 0x0)); + RzILOpPure *op_AND_3 = LOGAND(Rs, SN(32, 1)); + RzILOpPure *cond_6 = ITE(NON_ZERO(op_AND_3), SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, &P1_op, CAST(8, MSB(cond_6), DUP(cond_6))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// p1 = tstbit(Rs,#0); if (!p1.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_tstbit0_fp1_jump_t_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_new_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P1_new = READ_REG(pkt, &P1_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_5 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_11 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_12 = SETL("r", op_AND_11); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_15 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_15_16 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_15)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, jump_op_ADD_15_16); + + // if (! (((st32) P1_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P1_new), DUP(P1_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_19 = BRANCH(op_INV_4, seq_then_18, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_5, branch_19); + return instruction_sequence; +} + +// if (tstbit(Ns.new,#0)) jump:nt Ii +RzILOpEffect *hex_il_op_j4_tstbit0_t_jumpnv_nt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((Ns_new & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(Ns_new, SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// if (tstbit(Ns.new,#0)) jump:t Ii +RzILOpEffect *hex_il_op_j4_tstbit0_t_jumpnv_t(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp Ns_new_op = NREG2OP(bundle, 's'); + RzILOpPure *Ns_new = READ_REG(pkt, &Ns_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((Ns_new & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(Ns_new, SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// p0 = tstbit(Rs,#0); if (p0.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_tstbit0_tp0_jump_nt_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // P0 = ((st8) ((Rs & 0x1) ? 0xff : 0x0)); + RzILOpPure *op_AND_3 = LOGAND(Rs, SN(32, 1)); + RzILOpPure *cond_6 = ITE(NON_ZERO(op_AND_3), SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_6), DUP(cond_6))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// p0 = tstbit(Rs,#0); if (p0.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_tstbit0_tp0_jump_nt_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((((st32) P0_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// p0 = tstbit(Rs,#0); if (p0.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_tstbit0_tp0_jump_t_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // P0 = ((st8) ((Rs & 0x1) ? 0xff : 0x0)); + RzILOpPure *op_AND_3 = LOGAND(Rs, SN(32, 1)); + RzILOpPure *cond_6 = ITE(NON_ZERO(op_AND_3), SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_6), DUP(cond_6))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// p0 = tstbit(Rs,#0); if (p0.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_tstbit0_tp0_jump_t_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((((st32) P0_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// p1 = tstbit(Rs,#0); if (p1.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_tstbit0_tp1_jump_nt_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // P1 = ((st8) ((Rs & 0x1) ? 0xff : 0x0)); + RzILOpPure *op_AND_3 = LOGAND(Rs, SN(32, 1)); + RzILOpPure *cond_6 = ITE(NON_ZERO(op_AND_3), SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, &P1_op, CAST(8, MSB(cond_6), DUP(cond_6))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// p1 = tstbit(Rs,#0); if (p1.new) jump:nt Ii +RzILOpEffect *hex_il_op_j4_tstbit0_tp1_jump_nt_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_new_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P1_new = READ_REG(pkt, &P1_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((((st32) P1_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P1_new), DUP(P1_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +// p1 = tstbit(Rs,#0); if (p1.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_tstbit0_tp1_jump_t_part0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // P1 = ((st8) ((Rs & 0x1) ? 0xff : 0x0)); + RzILOpPure *op_AND_3 = LOGAND(Rs, SN(32, 1)); + RzILOpPure *cond_6 = ITE(NON_ZERO(op_AND_3), SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, &P1_op, CAST(8, MSB(cond_6), DUP(cond_6))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_8; + return instruction_sequence; +} + +// p1 = tstbit(Rs,#0); if (p1.new) jump:t Ii +RzILOpEffect *hex_il_op_j4_tstbit0_tp1_jump_t_part1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P1_new_op = EXPLICIT2OP(1, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P1_new = READ_REG(pkt, &P1_new_op, true); + RzILOpPure *r = SN(32, (st32)ISA2IMM(hi, 'r')); + RzILOpPure *pc = U32(pkt->pkt_addr); + + // r = r; + RzILOpEffect *imm_assign_4 = SETL("r", r); + + // r = (r & -0x4); + RzILOpPure *op_AND_10 = LOGAND(VARL("r"), SN(32, -4)); + RzILOpEffect *op_ASSIGN_11 = SETL("r", op_AND_10); + + // jump(pc + ((ut32) r)); + RzILOpPure *op_ADD_14 = ADD(pc, CAST(32, IL_FALSE, VARL("r"))); + RzILOpEffect *jump_op_ADD_14_15 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", op_ADD_14)); + + // seq(r; r = (r & -0x4); jump(pc + ((ut32) r))); + RzILOpEffect *seq_then_17 = SEQN(2, op_ASSIGN_11, jump_op_ADD_14_15); + + // if ((((st32) P1_new) & 0x1)) {seq(r; r = (r & -0x4); jump(pc + ((ut32) r)))} else {{}}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P1_new), DUP(P1_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_3), seq_then_17, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, branch_18); + return instruction_sequence; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_L2_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_L2_ops.c new file mode 100644 index 00000000000..322f5d7ae16 --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_L2_ops.c @@ -0,0 +1,5717 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// Rdd = deallocframe(Rs):raw +RzILOpEffect *hex_il_op_l2_deallocframe(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut64 tmp; + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp framekey_op = ALIAS2OP(HEX_REG_ALIAS_FRAMEKEY, false); + RzILOpPure *framekey = READ_REG(pkt, &framekey_op, false); + const HexOp sp_op = ALIAS2OP(HEX_REG_ALIAS_SP, false); + + // EA = ((ut32) Rs); + RzILOpEffect *op_ASSIGN_4 = SETL("EA", CAST(32, IL_FALSE, Rs)); + + // tmp = ((ut64) mem_load_64(EA)); + RzILOpPure *ml_EA_6 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_8 = SETL("tmp", CAST(64, IL_FALSE, ml_EA_6)); + + // Rdd = ((st64) (tmp ^ (((ut64) framekey) << 0x20))); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(CAST(64, IL_FALSE, framekey), SN(32, 0x20)); + RzILOpPure *op_XOR_14 = LOGXOR(VARL("tmp"), op_LSHIFT_13); + RzILOpEffect *op_ASSIGN_16 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_XOR_14)); + + // sp = EA + ((ut32) 0x8); + RzILOpPure *op_ADD_20 = ADD(VARL("EA"), CAST(32, IL_FALSE, SN(32, 8))); + RzILOpEffect *op_ASSIGN_21 = WRITE_REG(bundle, &sp_op, op_ADD_20); + + RzILOpEffect *instruction_sequence = SEQN(4, op_ASSIGN_4, op_ASSIGN_8, op_ASSIGN_16, op_ASSIGN_21); + return instruction_sequence; +} + +// Ryy = memb_fifo(Rs+Ii) +RzILOpEffect *hex_il_op_l2_loadalignb_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + // Declare: ut64 tmpV; + const HexOp *Ryy_op = ISA2REG(hi, 'y', false); + RzILOpPure *Ryy = READ_REG(pkt, Ryy_op, false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // EA = ((ut32) Rs + s); + RzILOpPure *op_ADD_4 = ADD(Rs, VARL("s")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", CAST(32, IL_FALSE, op_ADD_4)); + + // tmpV = ((ut64) ((ut8) mem_load_8(EA))); + RzILOpPure *ml_EA_9 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_12 = SETL("tmpV", CAST(64, IL_FALSE, CAST(8, IL_FALSE, ml_EA_9))); + + // Ryy = ((st64) ((((ut64) Ryy) >> 0x8) | (tmpV << 0x38))); + RzILOpPure *op_RSHIFT_16 = SHIFTR0(CAST(64, IL_FALSE, Ryy), SN(32, 8)); + RzILOpPure *op_LSHIFT_18 = SHIFTL0(VARL("tmpV"), SN(32, 0x38)); + RzILOpPure *op_OR_19 = LOGOR(op_RSHIFT_16, op_LSHIFT_18); + RzILOpEffect *op_ASSIGN_21 = WRITE_REG(bundle, Ryy_op, CAST(64, IL_FALSE, op_OR_19)); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, op_ASSIGN_6, op_ASSIGN_12, op_ASSIGN_21); + return instruction_sequence; +} + +// Ryy = memb_fifo(Rx++Mu:brev) +RzILOpEffect *hex_il_op_l2_loadalignb_pbr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + // Declare: ut64 tmpV; + const HexOp *Ryy_op = ISA2REG(hi, 'y', false); + RzILOpPure *Ryy = READ_REG(pkt, Ryy_op, false); + + // fbrev(((ut32) Rx)); + RzILOpEffect *fbrev_call_3 = hex_fbrev(CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // h_tmp164 = fbrev(((ut32) Rx)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_5 = SETL("h_tmp164", UNSIGNED(32, VARL("ret_val"))); + + // seq(fbrev(((ut32) Rx)); h_tmp164 = fbrev(((ut32) Rx))); + RzILOpEffect *seq_6 = SEQN(2, fbrev_call_3, op_ASSIGN_hybrid_tmp_5); + + // EA = h_tmp164; + RzILOpEffect *op_ASSIGN_7 = SETL("EA", VARL("h_tmp164")); + + // seq(seq(fbrev(((ut32) Rx)); h_tmp164 = fbrev(((ut32) Rx))); EA = ...; + RzILOpEffect *seq_8 = SEQN(2, seq_6, op_ASSIGN_7); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_10 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rx_op, op_ADD_10); + + // tmpV = ((ut64) ((ut8) mem_load_8(EA))); + RzILOpPure *ml_EA_14 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_17 = SETL("tmpV", CAST(64, IL_FALSE, CAST(8, IL_FALSE, ml_EA_14))); + + // Ryy = ((st64) ((((ut64) Ryy) >> 0x8) | (tmpV << 0x38))); + RzILOpPure *op_RSHIFT_21 = SHIFTR0(CAST(64, IL_FALSE, Ryy), SN(32, 8)); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(VARL("tmpV"), SN(32, 0x38)); + RzILOpPure *op_OR_24 = LOGOR(op_RSHIFT_21, op_LSHIFT_23); + RzILOpEffect *op_ASSIGN_26 = WRITE_REG(bundle, Ryy_op, CAST(64, IL_FALSE, op_OR_24)); + + RzILOpEffect *instruction_sequence = SEQN(4, seq_8, op_ASSIGN_11, op_ASSIGN_17, op_ASSIGN_26); + return instruction_sequence; +} + +// Ryy = memb_fifo(Rx++Ii:circ(Mu)) +RzILOpEffect *hex_il_op_l2_loadalignb_pci(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + // Declare: ut64 tmpV; + const HexOp *Ryy_op = ISA2REG(hi, 'y', false); + RzILOpPure *Ryy = READ_REG(pkt, Ryy_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *fcirc_add_call_9 = hex_fcirc_add(bundle, Rx_op, VARL("s"), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp165 = fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_11 = SETL("h_tmp165", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); ...; + RzILOpEffect *seq_12 = SEQN(2, fcirc_add_call_9, op_ASSIGN_hybrid_tmp_11); + + // tmpV = ((ut64) ((ut8) mem_load_8(EA))); + RzILOpPure *ml_EA_15 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_18 = SETL("tmpV", CAST(64, IL_FALSE, CAST(8, IL_FALSE, ml_EA_15))); + + // Ryy = ((st64) ((((ut64) Ryy) >> 0x8) | (tmpV << 0x38))); + RzILOpPure *op_RSHIFT_22 = SHIFTR0(CAST(64, IL_FALSE, Ryy), SN(32, 8)); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(VARL("tmpV"), SN(32, 0x38)); + RzILOpPure *op_OR_25 = LOGOR(op_RSHIFT_22, op_LSHIFT_24); + RzILOpEffect *op_ASSIGN_27 = WRITE_REG(bundle, Ryy_op, CAST(64, IL_FALSE, op_OR_25)); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_5, seq_12, op_ASSIGN_3, op_ASSIGN_18, op_ASSIGN_27); + return instruction_sequence; +} + +// Ryy = memb_fifo(Rx++I:circ(Mu)) +RzILOpEffect *hex_il_op_l2_loadalignb_pcr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + // Declare: ut64 tmpV; + const HexOp *Ryy_op = ISA2REG(hi, 'y', false); + RzILOpPure *Ryy = READ_REG(pkt, Ryy_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x0)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpPure *op_AND_10 = LOGAND(DUP(Mu), SN(32, 0xf0000000)); + RzILOpPure *op_RSHIFT_12 = SHIFTRA(op_AND_10, SN(32, 21)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(DUP(Mu), SN(32, 17)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0x7f)); + RzILOpPure *op_OR_17 = LOGOR(op_RSHIFT_12, op_AND_16); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(SEXTRACT64(CAST(64, IL_FALSE, op_OR_17), SN(32, 0), SN(32, 11)), SN(32, 0)); + RzILOpEffect *fcirc_add_call_27 = hex_fcirc_add(bundle, Rx_op, CAST(32, MSB(op_LSHIFT_24), DUP(op_LSHIFT_24)), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp166 = fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x0)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_29 = SETL("h_tmp166", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0x ...; + RzILOpEffect *seq_30 = SEQN(2, fcirc_add_call_27, op_ASSIGN_hybrid_tmp_29); + + // tmpV = ((ut64) ((ut8) mem_load_8(EA))); + RzILOpPure *ml_EA_33 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_36 = SETL("tmpV", CAST(64, IL_FALSE, CAST(8, IL_FALSE, ml_EA_33))); + + // Ryy = ((st64) ((((ut64) Ryy) >> 0x8) | (tmpV << 0x38))); + RzILOpPure *op_RSHIFT_40 = SHIFTR0(CAST(64, IL_FALSE, Ryy), SN(32, 8)); + RzILOpPure *op_LSHIFT_42 = SHIFTL0(VARL("tmpV"), SN(32, 0x38)); + RzILOpPure *op_OR_43 = LOGOR(op_RSHIFT_40, op_LSHIFT_42); + RzILOpEffect *op_ASSIGN_45 = WRITE_REG(bundle, Ryy_op, CAST(64, IL_FALSE, op_OR_43)); + + RzILOpEffect *instruction_sequence = SEQN(4, seq_30, op_ASSIGN_3, op_ASSIGN_36, op_ASSIGN_45); + return instruction_sequence; +} + +// Ryy = memb_fifo(Rx++Ii) +RzILOpEffect *hex_il_op_l2_loadalignb_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + // Declare: ut64 tmpV; + const HexOp *Ryy_op = ISA2REG(hi, 'y', false); + RzILOpPure *Ryy = READ_REG(pkt, Ryy_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_7 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rx_op, op_ADD_7); + + // tmpV = ((ut64) ((ut8) mem_load_8(EA))); + RzILOpPure *ml_EA_11 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_14 = SETL("tmpV", CAST(64, IL_FALSE, CAST(8, IL_FALSE, ml_EA_11))); + + // Ryy = ((st64) ((((ut64) Ryy) >> 0x8) | (tmpV << 0x38))); + RzILOpPure *op_RSHIFT_18 = SHIFTR0(CAST(64, IL_FALSE, Ryy), SN(32, 8)); + RzILOpPure *op_LSHIFT_20 = SHIFTL0(VARL("tmpV"), SN(32, 0x38)); + RzILOpPure *op_OR_21 = LOGOR(op_RSHIFT_18, op_LSHIFT_20); + RzILOpEffect *op_ASSIGN_23 = WRITE_REG(bundle, Ryy_op, CAST(64, IL_FALSE, op_OR_21)); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_5, op_ASSIGN_3, op_ASSIGN_8, op_ASSIGN_14, op_ASSIGN_23); + return instruction_sequence; +} + +// Ryy = memb_fifo(Rx++Mu) +RzILOpEffect *hex_il_op_l2_loadalignb_pr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + // Declare: ut64 tmpV; + const HexOp *Ryy_op = ISA2REG(hi, 'y', false); + RzILOpPure *Ryy = READ_REG(pkt, Ryy_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_6 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rx_op, op_ADD_6); + + // tmpV = ((ut64) ((ut8) mem_load_8(EA))); + RzILOpPure *ml_EA_10 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_13 = SETL("tmpV", CAST(64, IL_FALSE, CAST(8, IL_FALSE, ml_EA_10))); + + // Ryy = ((st64) ((((ut64) Ryy) >> 0x8) | (tmpV << 0x38))); + RzILOpPure *op_RSHIFT_17 = SHIFTR0(CAST(64, IL_FALSE, Ryy), SN(32, 8)); + RzILOpPure *op_LSHIFT_19 = SHIFTL0(VARL("tmpV"), SN(32, 0x38)); + RzILOpPure *op_OR_20 = LOGOR(op_RSHIFT_17, op_LSHIFT_19); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Ryy_op, CAST(64, IL_FALSE, op_OR_20)); + + RzILOpEffect *instruction_sequence = SEQN(4, op_ASSIGN_3, op_ASSIGN_7, op_ASSIGN_13, op_ASSIGN_22); + return instruction_sequence; +} + +// Ryy = memh_fifo(Rs+Ii) +RzILOpEffect *hex_il_op_l2_loadalignh_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + // Declare: ut64 tmpV; + const HexOp *Ryy_op = ISA2REG(hi, 'y', false); + RzILOpPure *Ryy = READ_REG(pkt, Ryy_op, false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // EA = ((ut32) Rs + s); + RzILOpPure *op_ADD_4 = ADD(Rs, VARL("s")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", CAST(32, IL_FALSE, op_ADD_4)); + + // tmpV = ((ut64) ((ut16) mem_load_16(EA))); + RzILOpPure *ml_EA_9 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_12 = SETL("tmpV", CAST(64, IL_FALSE, CAST(16, IL_FALSE, ml_EA_9))); + + // Ryy = ((st64) ((((ut64) Ryy) >> 0x10) | (tmpV << 0x30))); + RzILOpPure *op_RSHIFT_16 = SHIFTR0(CAST(64, IL_FALSE, Ryy), SN(32, 16)); + RzILOpPure *op_LSHIFT_18 = SHIFTL0(VARL("tmpV"), SN(32, 0x30)); + RzILOpPure *op_OR_19 = LOGOR(op_RSHIFT_16, op_LSHIFT_18); + RzILOpEffect *op_ASSIGN_21 = WRITE_REG(bundle, Ryy_op, CAST(64, IL_FALSE, op_OR_19)); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, op_ASSIGN_6, op_ASSIGN_12, op_ASSIGN_21); + return instruction_sequence; +} + +// Ryy = memh_fifo(Rx++Mu:brev) +RzILOpEffect *hex_il_op_l2_loadalignh_pbr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + // Declare: ut64 tmpV; + const HexOp *Ryy_op = ISA2REG(hi, 'y', false); + RzILOpPure *Ryy = READ_REG(pkt, Ryy_op, false); + + // fbrev(((ut32) Rx)); + RzILOpEffect *fbrev_call_3 = hex_fbrev(CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // h_tmp167 = fbrev(((ut32) Rx)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_5 = SETL("h_tmp167", UNSIGNED(32, VARL("ret_val"))); + + // seq(fbrev(((ut32) Rx)); h_tmp167 = fbrev(((ut32) Rx))); + RzILOpEffect *seq_6 = SEQN(2, fbrev_call_3, op_ASSIGN_hybrid_tmp_5); + + // EA = h_tmp167; + RzILOpEffect *op_ASSIGN_7 = SETL("EA", VARL("h_tmp167")); + + // seq(seq(fbrev(((ut32) Rx)); h_tmp167 = fbrev(((ut32) Rx))); EA = ...; + RzILOpEffect *seq_8 = SEQN(2, seq_6, op_ASSIGN_7); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_10 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rx_op, op_ADD_10); + + // tmpV = ((ut64) ((ut16) mem_load_16(EA))); + RzILOpPure *ml_EA_14 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_17 = SETL("tmpV", CAST(64, IL_FALSE, CAST(16, IL_FALSE, ml_EA_14))); + + // Ryy = ((st64) ((((ut64) Ryy) >> 0x10) | (tmpV << 0x30))); + RzILOpPure *op_RSHIFT_21 = SHIFTR0(CAST(64, IL_FALSE, Ryy), SN(32, 16)); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(VARL("tmpV"), SN(32, 0x30)); + RzILOpPure *op_OR_24 = LOGOR(op_RSHIFT_21, op_LSHIFT_23); + RzILOpEffect *op_ASSIGN_26 = WRITE_REG(bundle, Ryy_op, CAST(64, IL_FALSE, op_OR_24)); + + RzILOpEffect *instruction_sequence = SEQN(4, seq_8, op_ASSIGN_11, op_ASSIGN_17, op_ASSIGN_26); + return instruction_sequence; +} + +// Ryy = memh_fifo(Rx++Ii:circ(Mu)) +RzILOpEffect *hex_il_op_l2_loadalignh_pci(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + // Declare: ut64 tmpV; + const HexOp *Ryy_op = ISA2REG(hi, 'y', false); + RzILOpPure *Ryy = READ_REG(pkt, Ryy_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *fcirc_add_call_9 = hex_fcirc_add(bundle, Rx_op, VARL("s"), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp168 = fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_11 = SETL("h_tmp168", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); ...; + RzILOpEffect *seq_12 = SEQN(2, fcirc_add_call_9, op_ASSIGN_hybrid_tmp_11); + + // tmpV = ((ut64) ((ut16) mem_load_16(EA))); + RzILOpPure *ml_EA_15 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_18 = SETL("tmpV", CAST(64, IL_FALSE, CAST(16, IL_FALSE, ml_EA_15))); + + // Ryy = ((st64) ((((ut64) Ryy) >> 0x10) | (tmpV << 0x30))); + RzILOpPure *op_RSHIFT_22 = SHIFTR0(CAST(64, IL_FALSE, Ryy), SN(32, 16)); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(VARL("tmpV"), SN(32, 0x30)); + RzILOpPure *op_OR_25 = LOGOR(op_RSHIFT_22, op_LSHIFT_24); + RzILOpEffect *op_ASSIGN_27 = WRITE_REG(bundle, Ryy_op, CAST(64, IL_FALSE, op_OR_25)); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_5, seq_12, op_ASSIGN_3, op_ASSIGN_18, op_ASSIGN_27); + return instruction_sequence; +} + +// Ryy = memh_fifo(Rx++I:circ(Mu)) +RzILOpEffect *hex_il_op_l2_loadalignh_pcr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + // Declare: ut64 tmpV; + const HexOp *Ryy_op = ISA2REG(hi, 'y', false); + RzILOpPure *Ryy = READ_REG(pkt, Ryy_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x1)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpPure *op_AND_10 = LOGAND(DUP(Mu), SN(32, 0xf0000000)); + RzILOpPure *op_RSHIFT_12 = SHIFTRA(op_AND_10, SN(32, 21)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(DUP(Mu), SN(32, 17)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0x7f)); + RzILOpPure *op_OR_17 = LOGOR(op_RSHIFT_12, op_AND_16); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(SEXTRACT64(CAST(64, IL_FALSE, op_OR_17), SN(32, 0), SN(32, 11)), SN(32, 1)); + RzILOpEffect *fcirc_add_call_27 = hex_fcirc_add(bundle, Rx_op, CAST(32, MSB(op_LSHIFT_24), DUP(op_LSHIFT_24)), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp169 = fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x1)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_29 = SETL("h_tmp169", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0x ...; + RzILOpEffect *seq_30 = SEQN(2, fcirc_add_call_27, op_ASSIGN_hybrid_tmp_29); + + // tmpV = ((ut64) ((ut16) mem_load_16(EA))); + RzILOpPure *ml_EA_33 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_36 = SETL("tmpV", CAST(64, IL_FALSE, CAST(16, IL_FALSE, ml_EA_33))); + + // Ryy = ((st64) ((((ut64) Ryy) >> 0x10) | (tmpV << 0x30))); + RzILOpPure *op_RSHIFT_40 = SHIFTR0(CAST(64, IL_FALSE, Ryy), SN(32, 16)); + RzILOpPure *op_LSHIFT_42 = SHIFTL0(VARL("tmpV"), SN(32, 0x30)); + RzILOpPure *op_OR_43 = LOGOR(op_RSHIFT_40, op_LSHIFT_42); + RzILOpEffect *op_ASSIGN_45 = WRITE_REG(bundle, Ryy_op, CAST(64, IL_FALSE, op_OR_43)); + + RzILOpEffect *instruction_sequence = SEQN(4, seq_30, op_ASSIGN_3, op_ASSIGN_36, op_ASSIGN_45); + return instruction_sequence; +} + +// Ryy = memh_fifo(Rx++Ii) +RzILOpEffect *hex_il_op_l2_loadalignh_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + // Declare: ut64 tmpV; + const HexOp *Ryy_op = ISA2REG(hi, 'y', false); + RzILOpPure *Ryy = READ_REG(pkt, Ryy_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_7 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rx_op, op_ADD_7); + + // tmpV = ((ut64) ((ut16) mem_load_16(EA))); + RzILOpPure *ml_EA_11 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_14 = SETL("tmpV", CAST(64, IL_FALSE, CAST(16, IL_FALSE, ml_EA_11))); + + // Ryy = ((st64) ((((ut64) Ryy) >> 0x10) | (tmpV << 0x30))); + RzILOpPure *op_RSHIFT_18 = SHIFTR0(CAST(64, IL_FALSE, Ryy), SN(32, 16)); + RzILOpPure *op_LSHIFT_20 = SHIFTL0(VARL("tmpV"), SN(32, 0x30)); + RzILOpPure *op_OR_21 = LOGOR(op_RSHIFT_18, op_LSHIFT_20); + RzILOpEffect *op_ASSIGN_23 = WRITE_REG(bundle, Ryy_op, CAST(64, IL_FALSE, op_OR_21)); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_5, op_ASSIGN_3, op_ASSIGN_8, op_ASSIGN_14, op_ASSIGN_23); + return instruction_sequence; +} + +// Ryy = memh_fifo(Rx++Mu) +RzILOpEffect *hex_il_op_l2_loadalignh_pr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + // Declare: ut64 tmpV; + const HexOp *Ryy_op = ISA2REG(hi, 'y', false); + RzILOpPure *Ryy = READ_REG(pkt, Ryy_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_6 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rx_op, op_ADD_6); + + // tmpV = ((ut64) ((ut16) mem_load_16(EA))); + RzILOpPure *ml_EA_10 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_13 = SETL("tmpV", CAST(64, IL_FALSE, CAST(16, IL_FALSE, ml_EA_10))); + + // Ryy = ((st64) ((((ut64) Ryy) >> 0x10) | (tmpV << 0x30))); + RzILOpPure *op_RSHIFT_17 = SHIFTR0(CAST(64, IL_FALSE, Ryy), SN(32, 16)); + RzILOpPure *op_LSHIFT_19 = SHIFTL0(VARL("tmpV"), SN(32, 0x30)); + RzILOpPure *op_OR_20 = LOGOR(op_RSHIFT_17, op_LSHIFT_19); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Ryy_op, CAST(64, IL_FALSE, op_OR_20)); + + RzILOpEffect *instruction_sequence = SEQN(4, op_ASSIGN_3, op_ASSIGN_7, op_ASSIGN_13, op_ASSIGN_22); + return instruction_sequence; +} + +// Rd = membh(Rs+Ii) +RzILOpEffect *hex_il_op_l2_loadbsw2_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + // Declare: ut16 tmpV; + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // EA = ((ut32) Rs + s); + RzILOpPure *op_ADD_4 = ADD(Rs, VARL("s")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", CAST(32, IL_FALSE, op_ADD_4)); + + // tmpV = ((ut16) mem_load_16(EA)); + RzILOpPure *ml_EA_10 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_12 = SETL("tmpV", CAST(16, IL_FALSE, ml_EA_10)); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_14 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_17 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp170 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_19 = SETL("h_tmp170", VARL("i")); + + // seq(h_tmp170 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_20 = SEQN(2, op_ASSIGN_hybrid_tmp_19, op_INC_17); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((st8) (((st32) (tmpV >> i * 0x8)) & 0xff))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_24 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_25 = SHIFTL0(SN(64, 0xffff), op_MUL_24); + RzILOpPure *op_NOT_26 = LOGNOT(op_LSHIFT_25); + RzILOpPure *op_AND_28 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_26); + RzILOpPure *op_MUL_30 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_31 = SHIFTR0(VARL("tmpV"), op_MUL_30); + RzILOpPure *op_AND_34 = LOGAND(CAST(32, IL_FALSE, op_RSHIFT_31), SN(32, 0xff)); + RzILOpPure *op_AND_38 = LOGAND(CAST(32, MSB(CAST(8, MSB(op_AND_34), DUP(op_AND_34))), CAST(8, MSB(DUP(op_AND_34)), DUP(op_AND_34))), SN(32, 0xffff)); + RzILOpPure *op_MUL_41 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_42 = SHIFTL0(CAST(64, IL_FALSE, op_AND_38), op_MUL_41); + RzILOpPure *op_OR_44 = LOGOR(CAST(64, IL_FALSE, op_AND_28), op_LSHIFT_42); + RzILOpEffect *op_ASSIGN_46 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_44)); + + // seq(h_tmp170; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << ...; + RzILOpEffect *seq_48 = op_ASSIGN_46; + + // seq(seq(h_tmp170; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ...; + RzILOpEffect *seq_49 = SEQN(2, seq_48, seq_20); + + // while ((i < 0x2)) { seq(seq(h_tmp170; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ... }; + RzILOpPure *op_LT_16 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_50 = REPEAT(op_LT_16, seq_49); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp170; Rd = ((st32) ...; + RzILOpEffect *seq_51 = SEQN(2, op_ASSIGN_14, for_50); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, op_ASSIGN_6, op_ASSIGN_12, seq_51); + return instruction_sequence; +} + +// Rd = membh(Rx++Mu:brev) +RzILOpEffect *hex_il_op_l2_loadbsw2_pbr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + // Declare: ut16 tmpV; + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // fbrev(((ut32) Rx)); + RzILOpEffect *fbrev_call_3 = hex_fbrev(CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // h_tmp171 = fbrev(((ut32) Rx)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_5 = SETL("h_tmp171", UNSIGNED(32, VARL("ret_val"))); + + // seq(fbrev(((ut32) Rx)); h_tmp171 = fbrev(((ut32) Rx))); + RzILOpEffect *seq_6 = SEQN(2, fbrev_call_3, op_ASSIGN_hybrid_tmp_5); + + // EA = h_tmp171; + RzILOpEffect *op_ASSIGN_7 = SETL("EA", VARL("h_tmp171")); + + // seq(seq(fbrev(((ut32) Rx)); h_tmp171 = fbrev(((ut32) Rx))); EA = ...; + RzILOpEffect *seq_8 = SEQN(2, seq_6, op_ASSIGN_7); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_10 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rx_op, op_ADD_10); + + // tmpV = ((ut16) mem_load_16(EA)); + RzILOpPure *ml_EA_15 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_17 = SETL("tmpV", CAST(16, IL_FALSE, ml_EA_15)); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_19 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_22 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp172 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_24 = SETL("h_tmp172", VARL("i")); + + // seq(h_tmp172 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_25 = SEQN(2, op_ASSIGN_hybrid_tmp_24, op_INC_22); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((st8) (((st32) (tmpV >> i * 0x8)) & 0xff))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_29 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_30 = SHIFTL0(SN(64, 0xffff), op_MUL_29); + RzILOpPure *op_NOT_31 = LOGNOT(op_LSHIFT_30); + RzILOpPure *op_AND_33 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_31); + RzILOpPure *op_MUL_35 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_36 = SHIFTR0(VARL("tmpV"), op_MUL_35); + RzILOpPure *op_AND_39 = LOGAND(CAST(32, IL_FALSE, op_RSHIFT_36), SN(32, 0xff)); + RzILOpPure *op_AND_43 = LOGAND(CAST(32, MSB(CAST(8, MSB(op_AND_39), DUP(op_AND_39))), CAST(8, MSB(DUP(op_AND_39)), DUP(op_AND_39))), SN(32, 0xffff)); + RzILOpPure *op_MUL_46 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_47 = SHIFTL0(CAST(64, IL_FALSE, op_AND_43), op_MUL_46); + RzILOpPure *op_OR_49 = LOGOR(CAST(64, IL_FALSE, op_AND_33), op_LSHIFT_47); + RzILOpEffect *op_ASSIGN_51 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_49)); + + // seq(h_tmp172; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << ...; + RzILOpEffect *seq_53 = op_ASSIGN_51; + + // seq(seq(h_tmp172; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ...; + RzILOpEffect *seq_54 = SEQN(2, seq_53, seq_25); + + // while ((i < 0x2)) { seq(seq(h_tmp172; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ... }; + RzILOpPure *op_LT_21 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_55 = REPEAT(op_LT_21, seq_54); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp172; Rd = ((st32) ...; + RzILOpEffect *seq_56 = SEQN(2, op_ASSIGN_19, for_55); + + RzILOpEffect *instruction_sequence = SEQN(4, seq_8, op_ASSIGN_11, op_ASSIGN_17, seq_56); + return instruction_sequence; +} + +// Rd = membh(Rx++Ii:circ(Mu)) +RzILOpEffect *hex_il_op_l2_loadbsw2_pci(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + // Declare: ut16 tmpV; + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *fcirc_add_call_9 = hex_fcirc_add(bundle, Rx_op, VARL("s"), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp173 = fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_11 = SETL("h_tmp173", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); ...; + RzILOpEffect *seq_12 = SEQN(2, fcirc_add_call_9, op_ASSIGN_hybrid_tmp_11); + + // tmpV = ((ut16) mem_load_16(EA)); + RzILOpPure *ml_EA_16 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_18 = SETL("tmpV", CAST(16, IL_FALSE, ml_EA_16)); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_20 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_23 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp174 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_25 = SETL("h_tmp174", VARL("i")); + + // seq(h_tmp174 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_26 = SEQN(2, op_ASSIGN_hybrid_tmp_25, op_INC_23); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((st8) (((st32) (tmpV >> i * 0x8)) & 0xff))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_30 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_31 = SHIFTL0(SN(64, 0xffff), op_MUL_30); + RzILOpPure *op_NOT_32 = LOGNOT(op_LSHIFT_31); + RzILOpPure *op_AND_34 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_32); + RzILOpPure *op_MUL_36 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_37 = SHIFTR0(VARL("tmpV"), op_MUL_36); + RzILOpPure *op_AND_40 = LOGAND(CAST(32, IL_FALSE, op_RSHIFT_37), SN(32, 0xff)); + RzILOpPure *op_AND_44 = LOGAND(CAST(32, MSB(CAST(8, MSB(op_AND_40), DUP(op_AND_40))), CAST(8, MSB(DUP(op_AND_40)), DUP(op_AND_40))), SN(32, 0xffff)); + RzILOpPure *op_MUL_47 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_48 = SHIFTL0(CAST(64, IL_FALSE, op_AND_44), op_MUL_47); + RzILOpPure *op_OR_50 = LOGOR(CAST(64, IL_FALSE, op_AND_34), op_LSHIFT_48); + RzILOpEffect *op_ASSIGN_52 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_50)); + + // seq(h_tmp174; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << ...; + RzILOpEffect *seq_54 = op_ASSIGN_52; + + // seq(seq(h_tmp174; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ...; + RzILOpEffect *seq_55 = SEQN(2, seq_54, seq_26); + + // while ((i < 0x2)) { seq(seq(h_tmp174; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ... }; + RzILOpPure *op_LT_22 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_56 = REPEAT(op_LT_22, seq_55); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp174; Rd = ((st32) ...; + RzILOpEffect *seq_57 = SEQN(2, op_ASSIGN_20, for_56); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_5, seq_12, op_ASSIGN_3, op_ASSIGN_18, seq_57); + return instruction_sequence; +} + +// Rd = membh(Rx++I:circ(Mu)) +RzILOpEffect *hex_il_op_l2_loadbsw2_pcr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + // Declare: ut16 tmpV; + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x1)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpPure *op_AND_10 = LOGAND(DUP(Mu), SN(32, 0xf0000000)); + RzILOpPure *op_RSHIFT_12 = SHIFTRA(op_AND_10, SN(32, 21)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(DUP(Mu), SN(32, 17)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0x7f)); + RzILOpPure *op_OR_17 = LOGOR(op_RSHIFT_12, op_AND_16); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(SEXTRACT64(CAST(64, IL_FALSE, op_OR_17), SN(32, 0), SN(32, 11)), SN(32, 1)); + RzILOpEffect *fcirc_add_call_27 = hex_fcirc_add(bundle, Rx_op, CAST(32, MSB(op_LSHIFT_24), DUP(op_LSHIFT_24)), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp175 = fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x1)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_29 = SETL("h_tmp175", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0x ...; + RzILOpEffect *seq_30 = SEQN(2, fcirc_add_call_27, op_ASSIGN_hybrid_tmp_29); + + // tmpV = ((ut16) mem_load_16(EA)); + RzILOpPure *ml_EA_34 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_36 = SETL("tmpV", CAST(16, IL_FALSE, ml_EA_34)); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_38 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_41 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp176 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_43 = SETL("h_tmp176", VARL("i")); + + // seq(h_tmp176 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_44 = SEQN(2, op_ASSIGN_hybrid_tmp_43, op_INC_41); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((st8) (((st32) (tmpV >> i * 0x8)) & 0xff))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_48 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_49 = SHIFTL0(SN(64, 0xffff), op_MUL_48); + RzILOpPure *op_NOT_50 = LOGNOT(op_LSHIFT_49); + RzILOpPure *op_AND_52 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_50); + RzILOpPure *op_MUL_54 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_55 = SHIFTR0(VARL("tmpV"), op_MUL_54); + RzILOpPure *op_AND_58 = LOGAND(CAST(32, IL_FALSE, op_RSHIFT_55), SN(32, 0xff)); + RzILOpPure *op_AND_62 = LOGAND(CAST(32, MSB(CAST(8, MSB(op_AND_58), DUP(op_AND_58))), CAST(8, MSB(DUP(op_AND_58)), DUP(op_AND_58))), SN(32, 0xffff)); + RzILOpPure *op_MUL_65 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_66 = SHIFTL0(CAST(64, IL_FALSE, op_AND_62), op_MUL_65); + RzILOpPure *op_OR_68 = LOGOR(CAST(64, IL_FALSE, op_AND_52), op_LSHIFT_66); + RzILOpEffect *op_ASSIGN_70 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_68)); + + // seq(h_tmp176; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << ...; + RzILOpEffect *seq_72 = op_ASSIGN_70; + + // seq(seq(h_tmp176; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ...; + RzILOpEffect *seq_73 = SEQN(2, seq_72, seq_44); + + // while ((i < 0x2)) { seq(seq(h_tmp176; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ... }; + RzILOpPure *op_LT_40 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_74 = REPEAT(op_LT_40, seq_73); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp176; Rd = ((st32) ...; + RzILOpEffect *seq_75 = SEQN(2, op_ASSIGN_38, for_74); + + RzILOpEffect *instruction_sequence = SEQN(4, seq_30, op_ASSIGN_3, op_ASSIGN_36, seq_75); + return instruction_sequence; +} + +// Rd = membh(Rx++Ii) +RzILOpEffect *hex_il_op_l2_loadbsw2_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + // Declare: ut16 tmpV; + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_7 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rx_op, op_ADD_7); + + // tmpV = ((ut16) mem_load_16(EA)); + RzILOpPure *ml_EA_12 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_14 = SETL("tmpV", CAST(16, IL_FALSE, ml_EA_12)); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_16 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_19 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp177 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_21 = SETL("h_tmp177", VARL("i")); + + // seq(h_tmp177 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_22 = SEQN(2, op_ASSIGN_hybrid_tmp_21, op_INC_19); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((st8) (((st32) (tmpV >> i * 0x8)) & 0xff))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_27 = SHIFTL0(SN(64, 0xffff), op_MUL_26); + RzILOpPure *op_NOT_28 = LOGNOT(op_LSHIFT_27); + RzILOpPure *op_AND_30 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_28); + RzILOpPure *op_MUL_32 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_33 = SHIFTR0(VARL("tmpV"), op_MUL_32); + RzILOpPure *op_AND_36 = LOGAND(CAST(32, IL_FALSE, op_RSHIFT_33), SN(32, 0xff)); + RzILOpPure *op_AND_40 = LOGAND(CAST(32, MSB(CAST(8, MSB(op_AND_36), DUP(op_AND_36))), CAST(8, MSB(DUP(op_AND_36)), DUP(op_AND_36))), SN(32, 0xffff)); + RzILOpPure *op_MUL_43 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_44 = SHIFTL0(CAST(64, IL_FALSE, op_AND_40), op_MUL_43); + RzILOpPure *op_OR_46 = LOGOR(CAST(64, IL_FALSE, op_AND_30), op_LSHIFT_44); + RzILOpEffect *op_ASSIGN_48 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_46)); + + // seq(h_tmp177; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << ...; + RzILOpEffect *seq_50 = op_ASSIGN_48; + + // seq(seq(h_tmp177; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ...; + RzILOpEffect *seq_51 = SEQN(2, seq_50, seq_22); + + // while ((i < 0x2)) { seq(seq(h_tmp177; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ... }; + RzILOpPure *op_LT_18 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_52 = REPEAT(op_LT_18, seq_51); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp177; Rd = ((st32) ...; + RzILOpEffect *seq_53 = SEQN(2, op_ASSIGN_16, for_52); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_5, op_ASSIGN_3, op_ASSIGN_8, op_ASSIGN_14, seq_53); + return instruction_sequence; +} + +// Rd = membh(Rx++Mu) +RzILOpEffect *hex_il_op_l2_loadbsw2_pr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + // Declare: ut16 tmpV; + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_6 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rx_op, op_ADD_6); + + // tmpV = ((ut16) mem_load_16(EA)); + RzILOpPure *ml_EA_11 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_13 = SETL("tmpV", CAST(16, IL_FALSE, ml_EA_11)); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_15 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_18 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp178 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_20 = SETL("h_tmp178", VARL("i")); + + // seq(h_tmp178 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_21 = SEQN(2, op_ASSIGN_hybrid_tmp_20, op_INC_18); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((st8) (((st32) (tmpV >> i * 0x8)) & 0xff))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_25 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_26 = SHIFTL0(SN(64, 0xffff), op_MUL_25); + RzILOpPure *op_NOT_27 = LOGNOT(op_LSHIFT_26); + RzILOpPure *op_AND_29 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_27); + RzILOpPure *op_MUL_31 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_32 = SHIFTR0(VARL("tmpV"), op_MUL_31); + RzILOpPure *op_AND_35 = LOGAND(CAST(32, IL_FALSE, op_RSHIFT_32), SN(32, 0xff)); + RzILOpPure *op_AND_39 = LOGAND(CAST(32, MSB(CAST(8, MSB(op_AND_35), DUP(op_AND_35))), CAST(8, MSB(DUP(op_AND_35)), DUP(op_AND_35))), SN(32, 0xffff)); + RzILOpPure *op_MUL_42 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_43 = SHIFTL0(CAST(64, IL_FALSE, op_AND_39), op_MUL_42); + RzILOpPure *op_OR_45 = LOGOR(CAST(64, IL_FALSE, op_AND_29), op_LSHIFT_43); + RzILOpEffect *op_ASSIGN_47 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_45)); + + // seq(h_tmp178; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << ...; + RzILOpEffect *seq_49 = op_ASSIGN_47; + + // seq(seq(h_tmp178; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ...; + RzILOpEffect *seq_50 = SEQN(2, seq_49, seq_21); + + // while ((i < 0x2)) { seq(seq(h_tmp178; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ... }; + RzILOpPure *op_LT_17 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_51 = REPEAT(op_LT_17, seq_50); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp178; Rd = ((st32) ...; + RzILOpEffect *seq_52 = SEQN(2, op_ASSIGN_15, for_51); + + RzILOpEffect *instruction_sequence = SEQN(4, op_ASSIGN_3, op_ASSIGN_7, op_ASSIGN_13, seq_52); + return instruction_sequence; +} + +// Rdd = membh(Rs+Ii) +RzILOpEffect *hex_il_op_l2_loadbsw4_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + // Declare: ut32 tmpV; + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // EA = ((ut32) Rs + s); + RzILOpPure *op_ADD_4 = ADD(Rs, VARL("s")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", CAST(32, IL_FALSE, op_ADD_4)); + + // tmpV = ((ut32) mem_load_32(EA)); + RzILOpPure *ml_EA_10 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_12 = SETL("tmpV", CAST(32, IL_FALSE, ml_EA_10)); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_14 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_17 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp179 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_19 = SETL("h_tmp179", VARL("i")); + + // seq(h_tmp179 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_20 = SEQN(2, op_ASSIGN_hybrid_tmp_19, op_INC_17); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((st8) ((tmpV >> i * 0x8) & ((ut32) 0xff)))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_24 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_25 = SHIFTL0(SN(64, 0xffff), op_MUL_24); + RzILOpPure *op_NOT_26 = LOGNOT(op_LSHIFT_25); + RzILOpPure *op_AND_27 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_26); + RzILOpPure *op_MUL_29 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_30 = SHIFTR0(VARL("tmpV"), op_MUL_29); + RzILOpPure *op_AND_33 = LOGAND(op_RSHIFT_30, CAST(32, IL_FALSE, SN(32, 0xff))); + RzILOpPure *op_AND_37 = LOGAND(CAST(32, MSB(CAST(8, IL_FALSE, op_AND_33)), CAST(8, IL_FALSE, DUP(op_AND_33))), SN(32, 0xffff)); + RzILOpPure *op_MUL_40 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_41 = SHIFTL0(CAST(64, IL_FALSE, op_AND_37), op_MUL_40); + RzILOpPure *op_OR_43 = LOGOR(CAST(64, IL_FALSE, op_AND_27), op_LSHIFT_41); + RzILOpEffect *op_ASSIGN_45 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_43)); + + // seq(h_tmp179; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x1 ...; + RzILOpEffect *seq_47 = op_ASSIGN_45; + + // seq(seq(h_tmp179; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_48 = SEQN(2, seq_47, seq_20); + + // while ((i < 0x4)) { seq(seq(h_tmp179; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_16 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_49 = REPEAT(op_LT_16, seq_48); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp179; Rdd = ((st64) ...; + RzILOpEffect *seq_50 = SEQN(2, op_ASSIGN_14, for_49); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, op_ASSIGN_6, op_ASSIGN_12, seq_50); + return instruction_sequence; +} + +// Rdd = membh(Rx++Mu:brev) +RzILOpEffect *hex_il_op_l2_loadbsw4_pbr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + // Declare: ut32 tmpV; + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // fbrev(((ut32) Rx)); + RzILOpEffect *fbrev_call_3 = hex_fbrev(CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // h_tmp180 = fbrev(((ut32) Rx)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_5 = SETL("h_tmp180", UNSIGNED(32, VARL("ret_val"))); + + // seq(fbrev(((ut32) Rx)); h_tmp180 = fbrev(((ut32) Rx))); + RzILOpEffect *seq_6 = SEQN(2, fbrev_call_3, op_ASSIGN_hybrid_tmp_5); + + // EA = h_tmp180; + RzILOpEffect *op_ASSIGN_7 = SETL("EA", VARL("h_tmp180")); + + // seq(seq(fbrev(((ut32) Rx)); h_tmp180 = fbrev(((ut32) Rx))); EA = ...; + RzILOpEffect *seq_8 = SEQN(2, seq_6, op_ASSIGN_7); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_10 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rx_op, op_ADD_10); + + // tmpV = ((ut32) mem_load_32(EA)); + RzILOpPure *ml_EA_15 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_17 = SETL("tmpV", CAST(32, IL_FALSE, ml_EA_15)); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_19 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_22 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp181 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_24 = SETL("h_tmp181", VARL("i")); + + // seq(h_tmp181 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_25 = SEQN(2, op_ASSIGN_hybrid_tmp_24, op_INC_22); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((st8) ((tmpV >> i * 0x8) & ((ut32) 0xff)))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_29 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_30 = SHIFTL0(SN(64, 0xffff), op_MUL_29); + RzILOpPure *op_NOT_31 = LOGNOT(op_LSHIFT_30); + RzILOpPure *op_AND_32 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_31); + RzILOpPure *op_MUL_34 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_35 = SHIFTR0(VARL("tmpV"), op_MUL_34); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_35, CAST(32, IL_FALSE, SN(32, 0xff))); + RzILOpPure *op_AND_42 = LOGAND(CAST(32, MSB(CAST(8, IL_FALSE, op_AND_38)), CAST(8, IL_FALSE, DUP(op_AND_38))), SN(32, 0xffff)); + RzILOpPure *op_MUL_45 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_46 = SHIFTL0(CAST(64, IL_FALSE, op_AND_42), op_MUL_45); + RzILOpPure *op_OR_48 = LOGOR(CAST(64, IL_FALSE, op_AND_32), op_LSHIFT_46); + RzILOpEffect *op_ASSIGN_50 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_48)); + + // seq(h_tmp181; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x1 ...; + RzILOpEffect *seq_52 = op_ASSIGN_50; + + // seq(seq(h_tmp181; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_53 = SEQN(2, seq_52, seq_25); + + // while ((i < 0x4)) { seq(seq(h_tmp181; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_21 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_54 = REPEAT(op_LT_21, seq_53); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp181; Rdd = ((st64) ...; + RzILOpEffect *seq_55 = SEQN(2, op_ASSIGN_19, for_54); + + RzILOpEffect *instruction_sequence = SEQN(4, seq_8, op_ASSIGN_11, op_ASSIGN_17, seq_55); + return instruction_sequence; +} + +// Rdd = membh(Rx++Ii:circ(Mu)) +RzILOpEffect *hex_il_op_l2_loadbsw4_pci(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + // Declare: ut32 tmpV; + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *fcirc_add_call_9 = hex_fcirc_add(bundle, Rx_op, VARL("s"), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp182 = fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_11 = SETL("h_tmp182", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); ...; + RzILOpEffect *seq_12 = SEQN(2, fcirc_add_call_9, op_ASSIGN_hybrid_tmp_11); + + // tmpV = ((ut32) mem_load_32(EA)); + RzILOpPure *ml_EA_16 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_18 = SETL("tmpV", CAST(32, IL_FALSE, ml_EA_16)); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_20 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_23 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp183 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_25 = SETL("h_tmp183", VARL("i")); + + // seq(h_tmp183 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_26 = SEQN(2, op_ASSIGN_hybrid_tmp_25, op_INC_23); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((st8) ((tmpV >> i * 0x8) & ((ut32) 0xff)))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_30 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_31 = SHIFTL0(SN(64, 0xffff), op_MUL_30); + RzILOpPure *op_NOT_32 = LOGNOT(op_LSHIFT_31); + RzILOpPure *op_AND_33 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_32); + RzILOpPure *op_MUL_35 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_36 = SHIFTR0(VARL("tmpV"), op_MUL_35); + RzILOpPure *op_AND_39 = LOGAND(op_RSHIFT_36, CAST(32, IL_FALSE, SN(32, 0xff))); + RzILOpPure *op_AND_43 = LOGAND(CAST(32, MSB(CAST(8, IL_FALSE, op_AND_39)), CAST(8, IL_FALSE, DUP(op_AND_39))), SN(32, 0xffff)); + RzILOpPure *op_MUL_46 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_47 = SHIFTL0(CAST(64, IL_FALSE, op_AND_43), op_MUL_46); + RzILOpPure *op_OR_49 = LOGOR(CAST(64, IL_FALSE, op_AND_33), op_LSHIFT_47); + RzILOpEffect *op_ASSIGN_51 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_49)); + + // seq(h_tmp183; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x1 ...; + RzILOpEffect *seq_53 = op_ASSIGN_51; + + // seq(seq(h_tmp183; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_54 = SEQN(2, seq_53, seq_26); + + // while ((i < 0x4)) { seq(seq(h_tmp183; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_22 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_55 = REPEAT(op_LT_22, seq_54); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp183; Rdd = ((st64) ...; + RzILOpEffect *seq_56 = SEQN(2, op_ASSIGN_20, for_55); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_5, seq_12, op_ASSIGN_3, op_ASSIGN_18, seq_56); + return instruction_sequence; +} + +// Rdd = membh(Rx++I:circ(Mu)) +RzILOpEffect *hex_il_op_l2_loadbsw4_pcr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + // Declare: ut32 tmpV; + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x2)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpPure *op_AND_10 = LOGAND(DUP(Mu), SN(32, 0xf0000000)); + RzILOpPure *op_RSHIFT_12 = SHIFTRA(op_AND_10, SN(32, 21)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(DUP(Mu), SN(32, 17)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0x7f)); + RzILOpPure *op_OR_17 = LOGOR(op_RSHIFT_12, op_AND_16); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(SEXTRACT64(CAST(64, IL_FALSE, op_OR_17), SN(32, 0), SN(32, 11)), SN(32, 2)); + RzILOpEffect *fcirc_add_call_27 = hex_fcirc_add(bundle, Rx_op, CAST(32, MSB(op_LSHIFT_24), DUP(op_LSHIFT_24)), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp184 = fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x2)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_29 = SETL("h_tmp184", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0x ...; + RzILOpEffect *seq_30 = SEQN(2, fcirc_add_call_27, op_ASSIGN_hybrid_tmp_29); + + // tmpV = ((ut32) mem_load_32(EA)); + RzILOpPure *ml_EA_34 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_36 = SETL("tmpV", CAST(32, IL_FALSE, ml_EA_34)); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_38 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_41 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp185 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_43 = SETL("h_tmp185", VARL("i")); + + // seq(h_tmp185 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_44 = SEQN(2, op_ASSIGN_hybrid_tmp_43, op_INC_41); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((st8) ((tmpV >> i * 0x8) & ((ut32) 0xff)))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_48 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_49 = SHIFTL0(SN(64, 0xffff), op_MUL_48); + RzILOpPure *op_NOT_50 = LOGNOT(op_LSHIFT_49); + RzILOpPure *op_AND_51 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_50); + RzILOpPure *op_MUL_53 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_54 = SHIFTR0(VARL("tmpV"), op_MUL_53); + RzILOpPure *op_AND_57 = LOGAND(op_RSHIFT_54, CAST(32, IL_FALSE, SN(32, 0xff))); + RzILOpPure *op_AND_61 = LOGAND(CAST(32, MSB(CAST(8, IL_FALSE, op_AND_57)), CAST(8, IL_FALSE, DUP(op_AND_57))), SN(32, 0xffff)); + RzILOpPure *op_MUL_64 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_65 = SHIFTL0(CAST(64, IL_FALSE, op_AND_61), op_MUL_64); + RzILOpPure *op_OR_67 = LOGOR(CAST(64, IL_FALSE, op_AND_51), op_LSHIFT_65); + RzILOpEffect *op_ASSIGN_69 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_67)); + + // seq(h_tmp185; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x1 ...; + RzILOpEffect *seq_71 = op_ASSIGN_69; + + // seq(seq(h_tmp185; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_72 = SEQN(2, seq_71, seq_44); + + // while ((i < 0x4)) { seq(seq(h_tmp185; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_40 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_73 = REPEAT(op_LT_40, seq_72); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp185; Rdd = ((st64) ...; + RzILOpEffect *seq_74 = SEQN(2, op_ASSIGN_38, for_73); + + RzILOpEffect *instruction_sequence = SEQN(4, seq_30, op_ASSIGN_3, op_ASSIGN_36, seq_74); + return instruction_sequence; +} + +// Rdd = membh(Rx++Ii) +RzILOpEffect *hex_il_op_l2_loadbsw4_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + // Declare: ut32 tmpV; + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_7 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rx_op, op_ADD_7); + + // tmpV = ((ut32) mem_load_32(EA)); + RzILOpPure *ml_EA_12 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_14 = SETL("tmpV", CAST(32, IL_FALSE, ml_EA_12)); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_16 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_19 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp186 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_21 = SETL("h_tmp186", VARL("i")); + + // seq(h_tmp186 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_22 = SEQN(2, op_ASSIGN_hybrid_tmp_21, op_INC_19); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((st8) ((tmpV >> i * 0x8) & ((ut32) 0xff)))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_27 = SHIFTL0(SN(64, 0xffff), op_MUL_26); + RzILOpPure *op_NOT_28 = LOGNOT(op_LSHIFT_27); + RzILOpPure *op_AND_29 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_28); + RzILOpPure *op_MUL_31 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_32 = SHIFTR0(VARL("tmpV"), op_MUL_31); + RzILOpPure *op_AND_35 = LOGAND(op_RSHIFT_32, CAST(32, IL_FALSE, SN(32, 0xff))); + RzILOpPure *op_AND_39 = LOGAND(CAST(32, MSB(CAST(8, IL_FALSE, op_AND_35)), CAST(8, IL_FALSE, DUP(op_AND_35))), SN(32, 0xffff)); + RzILOpPure *op_MUL_42 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_43 = SHIFTL0(CAST(64, IL_FALSE, op_AND_39), op_MUL_42); + RzILOpPure *op_OR_45 = LOGOR(CAST(64, IL_FALSE, op_AND_29), op_LSHIFT_43); + RzILOpEffect *op_ASSIGN_47 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_45)); + + // seq(h_tmp186; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x1 ...; + RzILOpEffect *seq_49 = op_ASSIGN_47; + + // seq(seq(h_tmp186; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_50 = SEQN(2, seq_49, seq_22); + + // while ((i < 0x4)) { seq(seq(h_tmp186; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_18 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_51 = REPEAT(op_LT_18, seq_50); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp186; Rdd = ((st64) ...; + RzILOpEffect *seq_52 = SEQN(2, op_ASSIGN_16, for_51); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_5, op_ASSIGN_3, op_ASSIGN_8, op_ASSIGN_14, seq_52); + return instruction_sequence; +} + +// Rdd = membh(Rx++Mu) +RzILOpEffect *hex_il_op_l2_loadbsw4_pr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + // Declare: ut32 tmpV; + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_6 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rx_op, op_ADD_6); + + // tmpV = ((ut32) mem_load_32(EA)); + RzILOpPure *ml_EA_11 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_13 = SETL("tmpV", CAST(32, IL_FALSE, ml_EA_11)); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_15 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_18 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp187 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_20 = SETL("h_tmp187", VARL("i")); + + // seq(h_tmp187 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_21 = SEQN(2, op_ASSIGN_hybrid_tmp_20, op_INC_18); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((st8) ((tmpV >> i * 0x8) & ((ut32) 0xff)))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_25 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_26 = SHIFTL0(SN(64, 0xffff), op_MUL_25); + RzILOpPure *op_NOT_27 = LOGNOT(op_LSHIFT_26); + RzILOpPure *op_AND_28 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_27); + RzILOpPure *op_MUL_30 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_31 = SHIFTR0(VARL("tmpV"), op_MUL_30); + RzILOpPure *op_AND_34 = LOGAND(op_RSHIFT_31, CAST(32, IL_FALSE, SN(32, 0xff))); + RzILOpPure *op_AND_38 = LOGAND(CAST(32, MSB(CAST(8, IL_FALSE, op_AND_34)), CAST(8, IL_FALSE, DUP(op_AND_34))), SN(32, 0xffff)); + RzILOpPure *op_MUL_41 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_42 = SHIFTL0(CAST(64, IL_FALSE, op_AND_38), op_MUL_41); + RzILOpPure *op_OR_44 = LOGOR(CAST(64, IL_FALSE, op_AND_28), op_LSHIFT_42); + RzILOpEffect *op_ASSIGN_46 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_44)); + + // seq(h_tmp187; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x1 ...; + RzILOpEffect *seq_48 = op_ASSIGN_46; + + // seq(seq(h_tmp187; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_49 = SEQN(2, seq_48, seq_21); + + // while ((i < 0x4)) { seq(seq(h_tmp187; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_17 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_50 = REPEAT(op_LT_17, seq_49); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp187; Rdd = ((st64) ...; + RzILOpEffect *seq_51 = SEQN(2, op_ASSIGN_15, for_50); + + RzILOpEffect *instruction_sequence = SEQN(4, op_ASSIGN_3, op_ASSIGN_7, op_ASSIGN_13, seq_51); + return instruction_sequence; +} + +// Rd = memubh(Rs+Ii) +RzILOpEffect *hex_il_op_l2_loadbzw2_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + // Declare: ut16 tmpV; + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // EA = ((ut32) Rs + s); + RzILOpPure *op_ADD_4 = ADD(Rs, VARL("s")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", CAST(32, IL_FALSE, op_ADD_4)); + + // tmpV = ((ut16) mem_load_16(EA)); + RzILOpPure *ml_EA_10 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_12 = SETL("tmpV", CAST(16, IL_FALSE, ml_EA_10)); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_14 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_17 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp188 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_19 = SETL("h_tmp188", VARL("i")); + + // seq(h_tmp188 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_20 = SEQN(2, op_ASSIGN_hybrid_tmp_19, op_INC_17); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((ut8) (((st32) (tmpV >> i * 0x8)) & 0xff))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_24 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_25 = SHIFTL0(SN(64, 0xffff), op_MUL_24); + RzILOpPure *op_NOT_26 = LOGNOT(op_LSHIFT_25); + RzILOpPure *op_AND_28 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_26); + RzILOpPure *op_MUL_30 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_31 = SHIFTR0(VARL("tmpV"), op_MUL_30); + RzILOpPure *op_AND_34 = LOGAND(CAST(32, IL_FALSE, op_RSHIFT_31), SN(32, 0xff)); + RzILOpPure *op_AND_38 = LOGAND(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_34)), SN(32, 0xffff)); + RzILOpPure *op_MUL_41 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_42 = SHIFTL0(CAST(64, IL_FALSE, op_AND_38), op_MUL_41); + RzILOpPure *op_OR_44 = LOGOR(CAST(64, IL_FALSE, op_AND_28), op_LSHIFT_42); + RzILOpEffect *op_ASSIGN_46 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_44)); + + // seq(h_tmp188; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << ...; + RzILOpEffect *seq_48 = op_ASSIGN_46; + + // seq(seq(h_tmp188; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ...; + RzILOpEffect *seq_49 = SEQN(2, seq_48, seq_20); + + // while ((i < 0x2)) { seq(seq(h_tmp188; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ... }; + RzILOpPure *op_LT_16 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_50 = REPEAT(op_LT_16, seq_49); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp188; Rd = ((st32) ...; + RzILOpEffect *seq_51 = SEQN(2, op_ASSIGN_14, for_50); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, op_ASSIGN_6, op_ASSIGN_12, seq_51); + return instruction_sequence; +} + +// Rd = memubh(Rx++Mu:brev) +RzILOpEffect *hex_il_op_l2_loadbzw2_pbr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + // Declare: ut16 tmpV; + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // fbrev(((ut32) Rx)); + RzILOpEffect *fbrev_call_3 = hex_fbrev(CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // h_tmp189 = fbrev(((ut32) Rx)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_5 = SETL("h_tmp189", UNSIGNED(32, VARL("ret_val"))); + + // seq(fbrev(((ut32) Rx)); h_tmp189 = fbrev(((ut32) Rx))); + RzILOpEffect *seq_6 = SEQN(2, fbrev_call_3, op_ASSIGN_hybrid_tmp_5); + + // EA = h_tmp189; + RzILOpEffect *op_ASSIGN_7 = SETL("EA", VARL("h_tmp189")); + + // seq(seq(fbrev(((ut32) Rx)); h_tmp189 = fbrev(((ut32) Rx))); EA = ...; + RzILOpEffect *seq_8 = SEQN(2, seq_6, op_ASSIGN_7); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_10 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rx_op, op_ADD_10); + + // tmpV = ((ut16) mem_load_16(EA)); + RzILOpPure *ml_EA_15 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_17 = SETL("tmpV", CAST(16, IL_FALSE, ml_EA_15)); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_19 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_22 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp190 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_24 = SETL("h_tmp190", VARL("i")); + + // seq(h_tmp190 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_25 = SEQN(2, op_ASSIGN_hybrid_tmp_24, op_INC_22); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((ut8) (((st32) (tmpV >> i * 0x8)) & 0xff))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_29 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_30 = SHIFTL0(SN(64, 0xffff), op_MUL_29); + RzILOpPure *op_NOT_31 = LOGNOT(op_LSHIFT_30); + RzILOpPure *op_AND_33 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_31); + RzILOpPure *op_MUL_35 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_36 = SHIFTR0(VARL("tmpV"), op_MUL_35); + RzILOpPure *op_AND_39 = LOGAND(CAST(32, IL_FALSE, op_RSHIFT_36), SN(32, 0xff)); + RzILOpPure *op_AND_43 = LOGAND(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_39)), SN(32, 0xffff)); + RzILOpPure *op_MUL_46 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_47 = SHIFTL0(CAST(64, IL_FALSE, op_AND_43), op_MUL_46); + RzILOpPure *op_OR_49 = LOGOR(CAST(64, IL_FALSE, op_AND_33), op_LSHIFT_47); + RzILOpEffect *op_ASSIGN_51 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_49)); + + // seq(h_tmp190; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << ...; + RzILOpEffect *seq_53 = op_ASSIGN_51; + + // seq(seq(h_tmp190; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ...; + RzILOpEffect *seq_54 = SEQN(2, seq_53, seq_25); + + // while ((i < 0x2)) { seq(seq(h_tmp190; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ... }; + RzILOpPure *op_LT_21 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_55 = REPEAT(op_LT_21, seq_54); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp190; Rd = ((st32) ...; + RzILOpEffect *seq_56 = SEQN(2, op_ASSIGN_19, for_55); + + RzILOpEffect *instruction_sequence = SEQN(4, seq_8, op_ASSIGN_11, op_ASSIGN_17, seq_56); + return instruction_sequence; +} + +// Rd = memubh(Rx++Ii:circ(Mu)) +RzILOpEffect *hex_il_op_l2_loadbzw2_pci(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + // Declare: ut16 tmpV; + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *fcirc_add_call_9 = hex_fcirc_add(bundle, Rx_op, VARL("s"), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp191 = fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_11 = SETL("h_tmp191", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); ...; + RzILOpEffect *seq_12 = SEQN(2, fcirc_add_call_9, op_ASSIGN_hybrid_tmp_11); + + // tmpV = ((ut16) mem_load_16(EA)); + RzILOpPure *ml_EA_16 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_18 = SETL("tmpV", CAST(16, IL_FALSE, ml_EA_16)); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_20 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_23 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp192 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_25 = SETL("h_tmp192", VARL("i")); + + // seq(h_tmp192 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_26 = SEQN(2, op_ASSIGN_hybrid_tmp_25, op_INC_23); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((ut8) (((st32) (tmpV >> i * 0x8)) & 0xff))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_30 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_31 = SHIFTL0(SN(64, 0xffff), op_MUL_30); + RzILOpPure *op_NOT_32 = LOGNOT(op_LSHIFT_31); + RzILOpPure *op_AND_34 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_32); + RzILOpPure *op_MUL_36 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_37 = SHIFTR0(VARL("tmpV"), op_MUL_36); + RzILOpPure *op_AND_40 = LOGAND(CAST(32, IL_FALSE, op_RSHIFT_37), SN(32, 0xff)); + RzILOpPure *op_AND_44 = LOGAND(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_40)), SN(32, 0xffff)); + RzILOpPure *op_MUL_47 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_48 = SHIFTL0(CAST(64, IL_FALSE, op_AND_44), op_MUL_47); + RzILOpPure *op_OR_50 = LOGOR(CAST(64, IL_FALSE, op_AND_34), op_LSHIFT_48); + RzILOpEffect *op_ASSIGN_52 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_50)); + + // seq(h_tmp192; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << ...; + RzILOpEffect *seq_54 = op_ASSIGN_52; + + // seq(seq(h_tmp192; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ...; + RzILOpEffect *seq_55 = SEQN(2, seq_54, seq_26); + + // while ((i < 0x2)) { seq(seq(h_tmp192; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ... }; + RzILOpPure *op_LT_22 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_56 = REPEAT(op_LT_22, seq_55); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp192; Rd = ((st32) ...; + RzILOpEffect *seq_57 = SEQN(2, op_ASSIGN_20, for_56); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_5, seq_12, op_ASSIGN_3, op_ASSIGN_18, seq_57); + return instruction_sequence; +} + +// Rd = memubh(Rx++I:circ(Mu)) +RzILOpEffect *hex_il_op_l2_loadbzw2_pcr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + // Declare: ut16 tmpV; + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x1)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpPure *op_AND_10 = LOGAND(DUP(Mu), SN(32, 0xf0000000)); + RzILOpPure *op_RSHIFT_12 = SHIFTRA(op_AND_10, SN(32, 21)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(DUP(Mu), SN(32, 17)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0x7f)); + RzILOpPure *op_OR_17 = LOGOR(op_RSHIFT_12, op_AND_16); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(SEXTRACT64(CAST(64, IL_FALSE, op_OR_17), SN(32, 0), SN(32, 11)), SN(32, 1)); + RzILOpEffect *fcirc_add_call_27 = hex_fcirc_add(bundle, Rx_op, CAST(32, MSB(op_LSHIFT_24), DUP(op_LSHIFT_24)), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp193 = fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x1)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_29 = SETL("h_tmp193", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0x ...; + RzILOpEffect *seq_30 = SEQN(2, fcirc_add_call_27, op_ASSIGN_hybrid_tmp_29); + + // tmpV = ((ut16) mem_load_16(EA)); + RzILOpPure *ml_EA_34 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_36 = SETL("tmpV", CAST(16, IL_FALSE, ml_EA_34)); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_38 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_41 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp194 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_43 = SETL("h_tmp194", VARL("i")); + + // seq(h_tmp194 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_44 = SEQN(2, op_ASSIGN_hybrid_tmp_43, op_INC_41); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((ut8) (((st32) (tmpV >> i * 0x8)) & 0xff))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_48 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_49 = SHIFTL0(SN(64, 0xffff), op_MUL_48); + RzILOpPure *op_NOT_50 = LOGNOT(op_LSHIFT_49); + RzILOpPure *op_AND_52 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_50); + RzILOpPure *op_MUL_54 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_55 = SHIFTR0(VARL("tmpV"), op_MUL_54); + RzILOpPure *op_AND_58 = LOGAND(CAST(32, IL_FALSE, op_RSHIFT_55), SN(32, 0xff)); + RzILOpPure *op_AND_62 = LOGAND(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_58)), SN(32, 0xffff)); + RzILOpPure *op_MUL_65 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_66 = SHIFTL0(CAST(64, IL_FALSE, op_AND_62), op_MUL_65); + RzILOpPure *op_OR_68 = LOGOR(CAST(64, IL_FALSE, op_AND_52), op_LSHIFT_66); + RzILOpEffect *op_ASSIGN_70 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_68)); + + // seq(h_tmp194; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << ...; + RzILOpEffect *seq_72 = op_ASSIGN_70; + + // seq(seq(h_tmp194; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ...; + RzILOpEffect *seq_73 = SEQN(2, seq_72, seq_44); + + // while ((i < 0x2)) { seq(seq(h_tmp194; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ... }; + RzILOpPure *op_LT_40 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_74 = REPEAT(op_LT_40, seq_73); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp194; Rd = ((st32) ...; + RzILOpEffect *seq_75 = SEQN(2, op_ASSIGN_38, for_74); + + RzILOpEffect *instruction_sequence = SEQN(4, seq_30, op_ASSIGN_3, op_ASSIGN_36, seq_75); + return instruction_sequence; +} + +// Rd = memubh(Rx++Ii) +RzILOpEffect *hex_il_op_l2_loadbzw2_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + // Declare: ut16 tmpV; + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_7 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rx_op, op_ADD_7); + + // tmpV = ((ut16) mem_load_16(EA)); + RzILOpPure *ml_EA_12 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_14 = SETL("tmpV", CAST(16, IL_FALSE, ml_EA_12)); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_16 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_19 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp195 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_21 = SETL("h_tmp195", VARL("i")); + + // seq(h_tmp195 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_22 = SEQN(2, op_ASSIGN_hybrid_tmp_21, op_INC_19); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((ut8) (((st32) (tmpV >> i * 0x8)) & 0xff))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_27 = SHIFTL0(SN(64, 0xffff), op_MUL_26); + RzILOpPure *op_NOT_28 = LOGNOT(op_LSHIFT_27); + RzILOpPure *op_AND_30 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_28); + RzILOpPure *op_MUL_32 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_33 = SHIFTR0(VARL("tmpV"), op_MUL_32); + RzILOpPure *op_AND_36 = LOGAND(CAST(32, IL_FALSE, op_RSHIFT_33), SN(32, 0xff)); + RzILOpPure *op_AND_40 = LOGAND(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_36)), SN(32, 0xffff)); + RzILOpPure *op_MUL_43 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_44 = SHIFTL0(CAST(64, IL_FALSE, op_AND_40), op_MUL_43); + RzILOpPure *op_OR_46 = LOGOR(CAST(64, IL_FALSE, op_AND_30), op_LSHIFT_44); + RzILOpEffect *op_ASSIGN_48 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_46)); + + // seq(h_tmp195; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << ...; + RzILOpEffect *seq_50 = op_ASSIGN_48; + + // seq(seq(h_tmp195; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ...; + RzILOpEffect *seq_51 = SEQN(2, seq_50, seq_22); + + // while ((i < 0x2)) { seq(seq(h_tmp195; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ... }; + RzILOpPure *op_LT_18 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_52 = REPEAT(op_LT_18, seq_51); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp195; Rd = ((st32) ...; + RzILOpEffect *seq_53 = SEQN(2, op_ASSIGN_16, for_52); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_5, op_ASSIGN_3, op_ASSIGN_8, op_ASSIGN_14, seq_53); + return instruction_sequence; +} + +// Rd = memubh(Rx++Mu) +RzILOpEffect *hex_il_op_l2_loadbzw2_pr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + // Declare: ut16 tmpV; + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_6 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rx_op, op_ADD_6); + + // tmpV = ((ut16) mem_load_16(EA)); + RzILOpPure *ml_EA_11 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_13 = SETL("tmpV", CAST(16, IL_FALSE, ml_EA_11)); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_15 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_18 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp196 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_20 = SETL("h_tmp196", VARL("i")); + + // seq(h_tmp196 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_21 = SEQN(2, op_ASSIGN_hybrid_tmp_20, op_INC_18); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((ut8) (((st32) (tmpV >> i * 0x8)) & 0xff))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_25 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_26 = SHIFTL0(SN(64, 0xffff), op_MUL_25); + RzILOpPure *op_NOT_27 = LOGNOT(op_LSHIFT_26); + RzILOpPure *op_AND_29 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_27); + RzILOpPure *op_MUL_31 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_32 = SHIFTR0(VARL("tmpV"), op_MUL_31); + RzILOpPure *op_AND_35 = LOGAND(CAST(32, IL_FALSE, op_RSHIFT_32), SN(32, 0xff)); + RzILOpPure *op_AND_39 = LOGAND(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_35)), SN(32, 0xffff)); + RzILOpPure *op_MUL_42 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_43 = SHIFTL0(CAST(64, IL_FALSE, op_AND_39), op_MUL_42); + RzILOpPure *op_OR_45 = LOGOR(CAST(64, IL_FALSE, op_AND_29), op_LSHIFT_43); + RzILOpEffect *op_ASSIGN_47 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_45)); + + // seq(h_tmp196; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << ...; + RzILOpEffect *seq_49 = op_ASSIGN_47; + + // seq(seq(h_tmp196; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ...; + RzILOpEffect *seq_50 = SEQN(2, seq_49, seq_21); + + // while ((i < 0x2)) { seq(seq(h_tmp196; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ... }; + RzILOpPure *op_LT_17 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_51 = REPEAT(op_LT_17, seq_50); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp196; Rd = ((st32) ...; + RzILOpEffect *seq_52 = SEQN(2, op_ASSIGN_15, for_51); + + RzILOpEffect *instruction_sequence = SEQN(4, op_ASSIGN_3, op_ASSIGN_7, op_ASSIGN_13, seq_52); + return instruction_sequence; +} + +// Rdd = memubh(Rs+Ii) +RzILOpEffect *hex_il_op_l2_loadbzw4_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + // Declare: ut32 tmpV; + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // EA = ((ut32) Rs + s); + RzILOpPure *op_ADD_4 = ADD(Rs, VARL("s")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", CAST(32, IL_FALSE, op_ADD_4)); + + // tmpV = ((ut32) mem_load_32(EA)); + RzILOpPure *ml_EA_10 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_12 = SETL("tmpV", CAST(32, IL_FALSE, ml_EA_10)); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_14 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_17 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp197 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_19 = SETL("h_tmp197", VARL("i")); + + // seq(h_tmp197 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_20 = SEQN(2, op_ASSIGN_hybrid_tmp_19, op_INC_17); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((ut8) ((tmpV >> i * 0x8) & ((ut32) 0xff)))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_24 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_25 = SHIFTL0(SN(64, 0xffff), op_MUL_24); + RzILOpPure *op_NOT_26 = LOGNOT(op_LSHIFT_25); + RzILOpPure *op_AND_27 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_26); + RzILOpPure *op_MUL_29 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_30 = SHIFTR0(VARL("tmpV"), op_MUL_29); + RzILOpPure *op_AND_33 = LOGAND(op_RSHIFT_30, CAST(32, IL_FALSE, SN(32, 0xff))); + RzILOpPure *op_AND_37 = LOGAND(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_33)), SN(32, 0xffff)); + RzILOpPure *op_MUL_40 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_41 = SHIFTL0(CAST(64, IL_FALSE, op_AND_37), op_MUL_40); + RzILOpPure *op_OR_43 = LOGOR(CAST(64, IL_FALSE, op_AND_27), op_LSHIFT_41); + RzILOpEffect *op_ASSIGN_45 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_43)); + + // seq(h_tmp197; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x1 ...; + RzILOpEffect *seq_47 = op_ASSIGN_45; + + // seq(seq(h_tmp197; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_48 = SEQN(2, seq_47, seq_20); + + // while ((i < 0x4)) { seq(seq(h_tmp197; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_16 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_49 = REPEAT(op_LT_16, seq_48); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp197; Rdd = ((st64) ...; + RzILOpEffect *seq_50 = SEQN(2, op_ASSIGN_14, for_49); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, op_ASSIGN_6, op_ASSIGN_12, seq_50); + return instruction_sequence; +} + +// Rdd = memubh(Rx++Mu:brev) +RzILOpEffect *hex_il_op_l2_loadbzw4_pbr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + // Declare: ut32 tmpV; + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // fbrev(((ut32) Rx)); + RzILOpEffect *fbrev_call_3 = hex_fbrev(CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // h_tmp198 = fbrev(((ut32) Rx)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_5 = SETL("h_tmp198", UNSIGNED(32, VARL("ret_val"))); + + // seq(fbrev(((ut32) Rx)); h_tmp198 = fbrev(((ut32) Rx))); + RzILOpEffect *seq_6 = SEQN(2, fbrev_call_3, op_ASSIGN_hybrid_tmp_5); + + // EA = h_tmp198; + RzILOpEffect *op_ASSIGN_7 = SETL("EA", VARL("h_tmp198")); + + // seq(seq(fbrev(((ut32) Rx)); h_tmp198 = fbrev(((ut32) Rx))); EA = ...; + RzILOpEffect *seq_8 = SEQN(2, seq_6, op_ASSIGN_7); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_10 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rx_op, op_ADD_10); + + // tmpV = ((ut32) mem_load_32(EA)); + RzILOpPure *ml_EA_15 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_17 = SETL("tmpV", CAST(32, IL_FALSE, ml_EA_15)); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_19 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_22 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp199 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_24 = SETL("h_tmp199", VARL("i")); + + // seq(h_tmp199 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_25 = SEQN(2, op_ASSIGN_hybrid_tmp_24, op_INC_22); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((ut8) ((tmpV >> i * 0x8) & ((ut32) 0xff)))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_29 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_30 = SHIFTL0(SN(64, 0xffff), op_MUL_29); + RzILOpPure *op_NOT_31 = LOGNOT(op_LSHIFT_30); + RzILOpPure *op_AND_32 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_31); + RzILOpPure *op_MUL_34 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_35 = SHIFTR0(VARL("tmpV"), op_MUL_34); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_35, CAST(32, IL_FALSE, SN(32, 0xff))); + RzILOpPure *op_AND_42 = LOGAND(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_38)), SN(32, 0xffff)); + RzILOpPure *op_MUL_45 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_46 = SHIFTL0(CAST(64, IL_FALSE, op_AND_42), op_MUL_45); + RzILOpPure *op_OR_48 = LOGOR(CAST(64, IL_FALSE, op_AND_32), op_LSHIFT_46); + RzILOpEffect *op_ASSIGN_50 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_48)); + + // seq(h_tmp199; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x1 ...; + RzILOpEffect *seq_52 = op_ASSIGN_50; + + // seq(seq(h_tmp199; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_53 = SEQN(2, seq_52, seq_25); + + // while ((i < 0x4)) { seq(seq(h_tmp199; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_21 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_54 = REPEAT(op_LT_21, seq_53); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp199; Rdd = ((st64) ...; + RzILOpEffect *seq_55 = SEQN(2, op_ASSIGN_19, for_54); + + RzILOpEffect *instruction_sequence = SEQN(4, seq_8, op_ASSIGN_11, op_ASSIGN_17, seq_55); + return instruction_sequence; +} + +// Rdd = memubh(Rx++Ii:circ(Mu)) +RzILOpEffect *hex_il_op_l2_loadbzw4_pci(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + // Declare: ut32 tmpV; + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *fcirc_add_call_9 = hex_fcirc_add(bundle, Rx_op, VARL("s"), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp200 = fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_11 = SETL("h_tmp200", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); ...; + RzILOpEffect *seq_12 = SEQN(2, fcirc_add_call_9, op_ASSIGN_hybrid_tmp_11); + + // tmpV = ((ut32) mem_load_32(EA)); + RzILOpPure *ml_EA_16 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_18 = SETL("tmpV", CAST(32, IL_FALSE, ml_EA_16)); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_20 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_23 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp201 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_25 = SETL("h_tmp201", VARL("i")); + + // seq(h_tmp201 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_26 = SEQN(2, op_ASSIGN_hybrid_tmp_25, op_INC_23); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((ut8) ((tmpV >> i * 0x8) & ((ut32) 0xff)))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_30 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_31 = SHIFTL0(SN(64, 0xffff), op_MUL_30); + RzILOpPure *op_NOT_32 = LOGNOT(op_LSHIFT_31); + RzILOpPure *op_AND_33 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_32); + RzILOpPure *op_MUL_35 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_36 = SHIFTR0(VARL("tmpV"), op_MUL_35); + RzILOpPure *op_AND_39 = LOGAND(op_RSHIFT_36, CAST(32, IL_FALSE, SN(32, 0xff))); + RzILOpPure *op_AND_43 = LOGAND(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_39)), SN(32, 0xffff)); + RzILOpPure *op_MUL_46 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_47 = SHIFTL0(CAST(64, IL_FALSE, op_AND_43), op_MUL_46); + RzILOpPure *op_OR_49 = LOGOR(CAST(64, IL_FALSE, op_AND_33), op_LSHIFT_47); + RzILOpEffect *op_ASSIGN_51 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_49)); + + // seq(h_tmp201; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x1 ...; + RzILOpEffect *seq_53 = op_ASSIGN_51; + + // seq(seq(h_tmp201; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_54 = SEQN(2, seq_53, seq_26); + + // while ((i < 0x4)) { seq(seq(h_tmp201; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_22 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_55 = REPEAT(op_LT_22, seq_54); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp201; Rdd = ((st64) ...; + RzILOpEffect *seq_56 = SEQN(2, op_ASSIGN_20, for_55); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_5, seq_12, op_ASSIGN_3, op_ASSIGN_18, seq_56); + return instruction_sequence; +} + +// Rdd = memubh(Rx++I:circ(Mu)) +RzILOpEffect *hex_il_op_l2_loadbzw4_pcr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + // Declare: ut32 tmpV; + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x2)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpPure *op_AND_10 = LOGAND(DUP(Mu), SN(32, 0xf0000000)); + RzILOpPure *op_RSHIFT_12 = SHIFTRA(op_AND_10, SN(32, 21)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(DUP(Mu), SN(32, 17)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0x7f)); + RzILOpPure *op_OR_17 = LOGOR(op_RSHIFT_12, op_AND_16); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(SEXTRACT64(CAST(64, IL_FALSE, op_OR_17), SN(32, 0), SN(32, 11)), SN(32, 2)); + RzILOpEffect *fcirc_add_call_27 = hex_fcirc_add(bundle, Rx_op, CAST(32, MSB(op_LSHIFT_24), DUP(op_LSHIFT_24)), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp202 = fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x2)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_29 = SETL("h_tmp202", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0x ...; + RzILOpEffect *seq_30 = SEQN(2, fcirc_add_call_27, op_ASSIGN_hybrid_tmp_29); + + // tmpV = ((ut32) mem_load_32(EA)); + RzILOpPure *ml_EA_34 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_36 = SETL("tmpV", CAST(32, IL_FALSE, ml_EA_34)); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_38 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_41 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp203 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_43 = SETL("h_tmp203", VARL("i")); + + // seq(h_tmp203 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_44 = SEQN(2, op_ASSIGN_hybrid_tmp_43, op_INC_41); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((ut8) ((tmpV >> i * 0x8) & ((ut32) 0xff)))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_48 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_49 = SHIFTL0(SN(64, 0xffff), op_MUL_48); + RzILOpPure *op_NOT_50 = LOGNOT(op_LSHIFT_49); + RzILOpPure *op_AND_51 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_50); + RzILOpPure *op_MUL_53 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_54 = SHIFTR0(VARL("tmpV"), op_MUL_53); + RzILOpPure *op_AND_57 = LOGAND(op_RSHIFT_54, CAST(32, IL_FALSE, SN(32, 0xff))); + RzILOpPure *op_AND_61 = LOGAND(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_57)), SN(32, 0xffff)); + RzILOpPure *op_MUL_64 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_65 = SHIFTL0(CAST(64, IL_FALSE, op_AND_61), op_MUL_64); + RzILOpPure *op_OR_67 = LOGOR(CAST(64, IL_FALSE, op_AND_51), op_LSHIFT_65); + RzILOpEffect *op_ASSIGN_69 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_67)); + + // seq(h_tmp203; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x1 ...; + RzILOpEffect *seq_71 = op_ASSIGN_69; + + // seq(seq(h_tmp203; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_72 = SEQN(2, seq_71, seq_44); + + // while ((i < 0x4)) { seq(seq(h_tmp203; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_40 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_73 = REPEAT(op_LT_40, seq_72); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp203; Rdd = ((st64) ...; + RzILOpEffect *seq_74 = SEQN(2, op_ASSIGN_38, for_73); + + RzILOpEffect *instruction_sequence = SEQN(4, seq_30, op_ASSIGN_3, op_ASSIGN_36, seq_74); + return instruction_sequence; +} + +// Rdd = memubh(Rx++Ii) +RzILOpEffect *hex_il_op_l2_loadbzw4_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + // Declare: ut32 tmpV; + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_7 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rx_op, op_ADD_7); + + // tmpV = ((ut32) mem_load_32(EA)); + RzILOpPure *ml_EA_12 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_14 = SETL("tmpV", CAST(32, IL_FALSE, ml_EA_12)); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_16 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_19 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp204 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_21 = SETL("h_tmp204", VARL("i")); + + // seq(h_tmp204 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_22 = SEQN(2, op_ASSIGN_hybrid_tmp_21, op_INC_19); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((ut8) ((tmpV >> i * 0x8) & ((ut32) 0xff)))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_27 = SHIFTL0(SN(64, 0xffff), op_MUL_26); + RzILOpPure *op_NOT_28 = LOGNOT(op_LSHIFT_27); + RzILOpPure *op_AND_29 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_28); + RzILOpPure *op_MUL_31 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_32 = SHIFTR0(VARL("tmpV"), op_MUL_31); + RzILOpPure *op_AND_35 = LOGAND(op_RSHIFT_32, CAST(32, IL_FALSE, SN(32, 0xff))); + RzILOpPure *op_AND_39 = LOGAND(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_35)), SN(32, 0xffff)); + RzILOpPure *op_MUL_42 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_43 = SHIFTL0(CAST(64, IL_FALSE, op_AND_39), op_MUL_42); + RzILOpPure *op_OR_45 = LOGOR(CAST(64, IL_FALSE, op_AND_29), op_LSHIFT_43); + RzILOpEffect *op_ASSIGN_47 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_45)); + + // seq(h_tmp204; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x1 ...; + RzILOpEffect *seq_49 = op_ASSIGN_47; + + // seq(seq(h_tmp204; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_50 = SEQN(2, seq_49, seq_22); + + // while ((i < 0x4)) { seq(seq(h_tmp204; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_18 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_51 = REPEAT(op_LT_18, seq_50); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp204; Rdd = ((st64) ...; + RzILOpEffect *seq_52 = SEQN(2, op_ASSIGN_16, for_51); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_5, op_ASSIGN_3, op_ASSIGN_8, op_ASSIGN_14, seq_52); + return instruction_sequence; +} + +// Rdd = memubh(Rx++Mu) +RzILOpEffect *hex_il_op_l2_loadbzw4_pr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + // Declare: ut32 tmpV; + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_6 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rx_op, op_ADD_6); + + // tmpV = ((ut32) mem_load_32(EA)); + RzILOpPure *ml_EA_11 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_13 = SETL("tmpV", CAST(32, IL_FALSE, ml_EA_11)); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_15 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_18 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp205 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_20 = SETL("h_tmp205", VARL("i")); + + // seq(h_tmp205 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_21 = SEQN(2, op_ASSIGN_hybrid_tmp_20, op_INC_18); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((ut8) ((tmpV >> i * 0x8) & ((ut32) 0xff)))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_25 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_26 = SHIFTL0(SN(64, 0xffff), op_MUL_25); + RzILOpPure *op_NOT_27 = LOGNOT(op_LSHIFT_26); + RzILOpPure *op_AND_28 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_27); + RzILOpPure *op_MUL_30 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_31 = SHIFTR0(VARL("tmpV"), op_MUL_30); + RzILOpPure *op_AND_34 = LOGAND(op_RSHIFT_31, CAST(32, IL_FALSE, SN(32, 0xff))); + RzILOpPure *op_AND_38 = LOGAND(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_34)), SN(32, 0xffff)); + RzILOpPure *op_MUL_41 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_42 = SHIFTL0(CAST(64, IL_FALSE, op_AND_38), op_MUL_41); + RzILOpPure *op_OR_44 = LOGOR(CAST(64, IL_FALSE, op_AND_28), op_LSHIFT_42); + RzILOpEffect *op_ASSIGN_46 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_44)); + + // seq(h_tmp205; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x1 ...; + RzILOpEffect *seq_48 = op_ASSIGN_46; + + // seq(seq(h_tmp205; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_49 = SEQN(2, seq_48, seq_21); + + // while ((i < 0x4)) { seq(seq(h_tmp205; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_17 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_50 = REPEAT(op_LT_17, seq_49); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp205; Rdd = ((st64) ...; + RzILOpEffect *seq_51 = SEQN(2, op_ASSIGN_15, for_50); + + RzILOpEffect *instruction_sequence = SEQN(4, op_ASSIGN_3, op_ASSIGN_7, op_ASSIGN_13, seq_51); + return instruction_sequence; +} + +// Rd = memb(Rs+Ii) +RzILOpEffect *hex_il_op_l2_loadrb_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // EA = ((ut32) Rs + s); + RzILOpPure *op_ADD_4 = ADD(Rs, VARL("s")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", CAST(32, IL_FALSE, op_ADD_4)); + + // Rd = ((st32) ((st8) mem_load_8(EA))); + RzILOpPure *ml_EA_9 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(8, MSB(ml_EA_9), DUP(ml_EA_9))), CAST(8, MSB(DUP(ml_EA_9)), DUP(ml_EA_9)))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, op_ASSIGN_12); + return instruction_sequence; +} + +// Rd = memb(Rx++Mu:brev) +RzILOpEffect *hex_il_op_l2_loadrb_pbr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // fbrev(((ut32) Rx)); + RzILOpEffect *fbrev_call_3 = hex_fbrev(CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // h_tmp206 = fbrev(((ut32) Rx)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_5 = SETL("h_tmp206", UNSIGNED(32, VARL("ret_val"))); + + // seq(fbrev(((ut32) Rx)); h_tmp206 = fbrev(((ut32) Rx))); + RzILOpEffect *seq_6 = SEQN(2, fbrev_call_3, op_ASSIGN_hybrid_tmp_5); + + // EA = h_tmp206; + RzILOpEffect *op_ASSIGN_7 = SETL("EA", VARL("h_tmp206")); + + // seq(seq(fbrev(((ut32) Rx)); h_tmp206 = fbrev(((ut32) Rx))); EA = ...; + RzILOpEffect *seq_8 = SEQN(2, seq_6, op_ASSIGN_7); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_10 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rx_op, op_ADD_10); + + // Rd = ((st32) ((st8) mem_load_8(EA))); + RzILOpPure *ml_EA_14 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(8, MSB(ml_EA_14), DUP(ml_EA_14))), CAST(8, MSB(DUP(ml_EA_14)), DUP(ml_EA_14)))); + + RzILOpEffect *instruction_sequence = SEQN(3, seq_8, op_ASSIGN_11, op_ASSIGN_17); + return instruction_sequence; +} + +// Rd = memb(Rx++Ii:circ(Mu)) +RzILOpEffect *hex_il_op_l2_loadrb_pci(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *fcirc_add_call_9 = hex_fcirc_add(bundle, Rx_op, VARL("s"), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp207 = fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_11 = SETL("h_tmp207", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); ...; + RzILOpEffect *seq_12 = SEQN(2, fcirc_add_call_9, op_ASSIGN_hybrid_tmp_11); + + // Rd = ((st32) ((st8) mem_load_8(EA))); + RzILOpPure *ml_EA_15 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(8, MSB(ml_EA_15), DUP(ml_EA_15))), CAST(8, MSB(DUP(ml_EA_15)), DUP(ml_EA_15)))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_5, seq_12, op_ASSIGN_3, op_ASSIGN_18); + return instruction_sequence; +} + +// Rd = memb(Rx++I:circ(Mu)) +RzILOpEffect *hex_il_op_l2_loadrb_pcr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x0)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpPure *op_AND_10 = LOGAND(DUP(Mu), SN(32, 0xf0000000)); + RzILOpPure *op_RSHIFT_12 = SHIFTRA(op_AND_10, SN(32, 21)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(DUP(Mu), SN(32, 17)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0x7f)); + RzILOpPure *op_OR_17 = LOGOR(op_RSHIFT_12, op_AND_16); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(SEXTRACT64(CAST(64, IL_FALSE, op_OR_17), SN(32, 0), SN(32, 11)), SN(32, 0)); + RzILOpEffect *fcirc_add_call_27 = hex_fcirc_add(bundle, Rx_op, CAST(32, MSB(op_LSHIFT_24), DUP(op_LSHIFT_24)), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp208 = fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x0)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_29 = SETL("h_tmp208", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0x ...; + RzILOpEffect *seq_30 = SEQN(2, fcirc_add_call_27, op_ASSIGN_hybrid_tmp_29); + + // Rd = ((st32) ((st8) mem_load_8(EA))); + RzILOpPure *ml_EA_33 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_36 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(8, MSB(ml_EA_33), DUP(ml_EA_33))), CAST(8, MSB(DUP(ml_EA_33)), DUP(ml_EA_33)))); + + RzILOpEffect *instruction_sequence = SEQN(3, seq_30, op_ASSIGN_3, op_ASSIGN_36); + return instruction_sequence; +} + +// Rd = memb(Rx++Ii) +RzILOpEffect *hex_il_op_l2_loadrb_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_7 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rx_op, op_ADD_7); + + // Rd = ((st32) ((st8) mem_load_8(EA))); + RzILOpPure *ml_EA_11 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_14 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(8, MSB(ml_EA_11), DUP(ml_EA_11))), CAST(8, MSB(DUP(ml_EA_11)), DUP(ml_EA_11)))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_5, op_ASSIGN_3, op_ASSIGN_8, op_ASSIGN_14); + return instruction_sequence; +} + +// Rd = memb(Rx++Mu) +RzILOpEffect *hex_il_op_l2_loadrb_pr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_6 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rx_op, op_ADD_6); + + // Rd = ((st32) ((st8) mem_load_8(EA))); + RzILOpPure *ml_EA_10 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(8, MSB(ml_EA_10), DUP(ml_EA_10))), CAST(8, MSB(DUP(ml_EA_10)), DUP(ml_EA_10)))); + + RzILOpEffect *instruction_sequence = SEQN(3, op_ASSIGN_3, op_ASSIGN_7, op_ASSIGN_13); + return instruction_sequence; +} + +// Rd = memb(gp+Ii) +RzILOpEffect *hex_il_op_l2_loadrbgp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp gp_op = ALIAS2OP(HEX_REG_ALIAS_GP, false); + RzILOpPure *gp = READ_REG(pkt, &gp_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = gp + u; + RzILOpPure *op_ADD_4 = ADD(gp, VARL("u")); + RzILOpEffect *op_ASSIGN_5 = SETL("EA", op_ADD_4); + + // Rd = ((st32) ((st8) mem_load_8(EA))); + RzILOpPure *ml_EA_8 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(8, MSB(ml_EA_8), DUP(ml_EA_8))), CAST(8, MSB(DUP(ml_EA_8)), DUP(ml_EA_8)))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_5, op_ASSIGN_11); + return instruction_sequence; +} + +// Rdd = memd(Rs+Ii) +RzILOpEffect *hex_il_op_l2_loadrd_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // EA = ((ut32) Rs + s); + RzILOpPure *op_ADD_4 = ADD(Rs, VARL("s")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", CAST(32, IL_FALSE, op_ADD_4)); + + // Rdd = ((st64) ((ut64) mem_load_64(EA))); + RzILOpPure *ml_EA_9 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, CAST(64, IL_FALSE, ml_EA_9))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, op_ASSIGN_12); + return instruction_sequence; +} + +// Rdd = memd(Rx++Mu:brev) +RzILOpEffect *hex_il_op_l2_loadrd_pbr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // fbrev(((ut32) Rx)); + RzILOpEffect *fbrev_call_3 = hex_fbrev(CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // h_tmp209 = fbrev(((ut32) Rx)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_5 = SETL("h_tmp209", UNSIGNED(32, VARL("ret_val"))); + + // seq(fbrev(((ut32) Rx)); h_tmp209 = fbrev(((ut32) Rx))); + RzILOpEffect *seq_6 = SEQN(2, fbrev_call_3, op_ASSIGN_hybrid_tmp_5); + + // EA = h_tmp209; + RzILOpEffect *op_ASSIGN_7 = SETL("EA", VARL("h_tmp209")); + + // seq(seq(fbrev(((ut32) Rx)); h_tmp209 = fbrev(((ut32) Rx))); EA = ...; + RzILOpEffect *seq_8 = SEQN(2, seq_6, op_ASSIGN_7); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_10 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rx_op, op_ADD_10); + + // Rdd = ((st64) ((ut64) mem_load_64(EA))); + RzILOpPure *ml_EA_14 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, CAST(64, IL_FALSE, ml_EA_14))); + + RzILOpEffect *instruction_sequence = SEQN(3, seq_8, op_ASSIGN_11, op_ASSIGN_17); + return instruction_sequence; +} + +// Rdd = memd(Rx++Ii:circ(Mu)) +RzILOpEffect *hex_il_op_l2_loadrd_pci(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *fcirc_add_call_9 = hex_fcirc_add(bundle, Rx_op, VARL("s"), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp210 = fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_11 = SETL("h_tmp210", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); ...; + RzILOpEffect *seq_12 = SEQN(2, fcirc_add_call_9, op_ASSIGN_hybrid_tmp_11); + + // Rdd = ((st64) ((ut64) mem_load_64(EA))); + RzILOpPure *ml_EA_15 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, CAST(64, IL_FALSE, ml_EA_15))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_5, seq_12, op_ASSIGN_3, op_ASSIGN_18); + return instruction_sequence; +} + +// Rdd = memd(Rx++I:circ(Mu)) +RzILOpEffect *hex_il_op_l2_loadrd_pcr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x3)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpPure *op_AND_10 = LOGAND(DUP(Mu), SN(32, 0xf0000000)); + RzILOpPure *op_RSHIFT_12 = SHIFTRA(op_AND_10, SN(32, 21)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(DUP(Mu), SN(32, 17)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0x7f)); + RzILOpPure *op_OR_17 = LOGOR(op_RSHIFT_12, op_AND_16); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(SEXTRACT64(CAST(64, IL_FALSE, op_OR_17), SN(32, 0), SN(32, 11)), SN(32, 3)); + RzILOpEffect *fcirc_add_call_27 = hex_fcirc_add(bundle, Rx_op, CAST(32, MSB(op_LSHIFT_24), DUP(op_LSHIFT_24)), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp211 = fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x3)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_29 = SETL("h_tmp211", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0x ...; + RzILOpEffect *seq_30 = SEQN(2, fcirc_add_call_27, op_ASSIGN_hybrid_tmp_29); + + // Rdd = ((st64) ((ut64) mem_load_64(EA))); + RzILOpPure *ml_EA_33 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_36 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, CAST(64, IL_FALSE, ml_EA_33))); + + RzILOpEffect *instruction_sequence = SEQN(3, seq_30, op_ASSIGN_3, op_ASSIGN_36); + return instruction_sequence; +} + +// Rdd = memd(Rx++Ii) +RzILOpEffect *hex_il_op_l2_loadrd_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_7 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rx_op, op_ADD_7); + + // Rdd = ((st64) ((ut64) mem_load_64(EA))); + RzILOpPure *ml_EA_11 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_14 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, CAST(64, IL_FALSE, ml_EA_11))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_5, op_ASSIGN_3, op_ASSIGN_8, op_ASSIGN_14); + return instruction_sequence; +} + +// Rdd = memd(Rx++Mu) +RzILOpEffect *hex_il_op_l2_loadrd_pr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_6 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rx_op, op_ADD_6); + + // Rdd = ((st64) ((ut64) mem_load_64(EA))); + RzILOpPure *ml_EA_10 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, CAST(64, IL_FALSE, ml_EA_10))); + + RzILOpEffect *instruction_sequence = SEQN(3, op_ASSIGN_3, op_ASSIGN_7, op_ASSIGN_13); + return instruction_sequence; +} + +// Rdd = memd(gp+Ii) +RzILOpEffect *hex_il_op_l2_loadrdgp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp gp_op = ALIAS2OP(HEX_REG_ALIAS_GP, false); + RzILOpPure *gp = READ_REG(pkt, &gp_op, false); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = gp + u; + RzILOpPure *op_ADD_4 = ADD(gp, VARL("u")); + RzILOpEffect *op_ASSIGN_5 = SETL("EA", op_ADD_4); + + // Rdd = ((st64) ((ut64) mem_load_64(EA))); + RzILOpPure *ml_EA_8 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, CAST(64, IL_FALSE, ml_EA_8))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_5, op_ASSIGN_11); + return instruction_sequence; +} + +// Rd = memh(Rs+Ii) +RzILOpEffect *hex_il_op_l2_loadrh_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // EA = ((ut32) Rs + s); + RzILOpPure *op_ADD_4 = ADD(Rs, VARL("s")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", CAST(32, IL_FALSE, op_ADD_4)); + + // Rd = ((st32) ((st16) mem_load_16(EA))); + RzILOpPure *ml_EA_9 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(16, MSB(ml_EA_9), DUP(ml_EA_9))), CAST(16, MSB(DUP(ml_EA_9)), DUP(ml_EA_9)))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, op_ASSIGN_12); + return instruction_sequence; +} + +// Rd = memh(Rx++Mu:brev) +RzILOpEffect *hex_il_op_l2_loadrh_pbr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // fbrev(((ut32) Rx)); + RzILOpEffect *fbrev_call_3 = hex_fbrev(CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // h_tmp212 = fbrev(((ut32) Rx)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_5 = SETL("h_tmp212", UNSIGNED(32, VARL("ret_val"))); + + // seq(fbrev(((ut32) Rx)); h_tmp212 = fbrev(((ut32) Rx))); + RzILOpEffect *seq_6 = SEQN(2, fbrev_call_3, op_ASSIGN_hybrid_tmp_5); + + // EA = h_tmp212; + RzILOpEffect *op_ASSIGN_7 = SETL("EA", VARL("h_tmp212")); + + // seq(seq(fbrev(((ut32) Rx)); h_tmp212 = fbrev(((ut32) Rx))); EA = ...; + RzILOpEffect *seq_8 = SEQN(2, seq_6, op_ASSIGN_7); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_10 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rx_op, op_ADD_10); + + // Rd = ((st32) ((st16) mem_load_16(EA))); + RzILOpPure *ml_EA_14 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(16, MSB(ml_EA_14), DUP(ml_EA_14))), CAST(16, MSB(DUP(ml_EA_14)), DUP(ml_EA_14)))); + + RzILOpEffect *instruction_sequence = SEQN(3, seq_8, op_ASSIGN_11, op_ASSIGN_17); + return instruction_sequence; +} + +// Rd = memh(Rx++Ii:circ(Mu)) +RzILOpEffect *hex_il_op_l2_loadrh_pci(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *fcirc_add_call_9 = hex_fcirc_add(bundle, Rx_op, VARL("s"), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp213 = fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_11 = SETL("h_tmp213", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); ...; + RzILOpEffect *seq_12 = SEQN(2, fcirc_add_call_9, op_ASSIGN_hybrid_tmp_11); + + // Rd = ((st32) ((st16) mem_load_16(EA))); + RzILOpPure *ml_EA_15 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(16, MSB(ml_EA_15), DUP(ml_EA_15))), CAST(16, MSB(DUP(ml_EA_15)), DUP(ml_EA_15)))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_5, seq_12, op_ASSIGN_3, op_ASSIGN_18); + return instruction_sequence; +} + +// Rd = memh(Rx++I:circ(Mu)) +RzILOpEffect *hex_il_op_l2_loadrh_pcr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x1)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpPure *op_AND_10 = LOGAND(DUP(Mu), SN(32, 0xf0000000)); + RzILOpPure *op_RSHIFT_12 = SHIFTRA(op_AND_10, SN(32, 21)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(DUP(Mu), SN(32, 17)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0x7f)); + RzILOpPure *op_OR_17 = LOGOR(op_RSHIFT_12, op_AND_16); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(SEXTRACT64(CAST(64, IL_FALSE, op_OR_17), SN(32, 0), SN(32, 11)), SN(32, 1)); + RzILOpEffect *fcirc_add_call_27 = hex_fcirc_add(bundle, Rx_op, CAST(32, MSB(op_LSHIFT_24), DUP(op_LSHIFT_24)), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp214 = fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x1)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_29 = SETL("h_tmp214", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0x ...; + RzILOpEffect *seq_30 = SEQN(2, fcirc_add_call_27, op_ASSIGN_hybrid_tmp_29); + + // Rd = ((st32) ((st16) mem_load_16(EA))); + RzILOpPure *ml_EA_33 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_36 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(16, MSB(ml_EA_33), DUP(ml_EA_33))), CAST(16, MSB(DUP(ml_EA_33)), DUP(ml_EA_33)))); + + RzILOpEffect *instruction_sequence = SEQN(3, seq_30, op_ASSIGN_3, op_ASSIGN_36); + return instruction_sequence; +} + +// Rd = memh(Rx++Ii) +RzILOpEffect *hex_il_op_l2_loadrh_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_7 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rx_op, op_ADD_7); + + // Rd = ((st32) ((st16) mem_load_16(EA))); + RzILOpPure *ml_EA_11 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_14 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(16, MSB(ml_EA_11), DUP(ml_EA_11))), CAST(16, MSB(DUP(ml_EA_11)), DUP(ml_EA_11)))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_5, op_ASSIGN_3, op_ASSIGN_8, op_ASSIGN_14); + return instruction_sequence; +} + +// Rd = memh(Rx++Mu) +RzILOpEffect *hex_il_op_l2_loadrh_pr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_6 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rx_op, op_ADD_6); + + // Rd = ((st32) ((st16) mem_load_16(EA))); + RzILOpPure *ml_EA_10 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(16, MSB(ml_EA_10), DUP(ml_EA_10))), CAST(16, MSB(DUP(ml_EA_10)), DUP(ml_EA_10)))); + + RzILOpEffect *instruction_sequence = SEQN(3, op_ASSIGN_3, op_ASSIGN_7, op_ASSIGN_13); + return instruction_sequence; +} + +// Rd = memh(gp+Ii) +RzILOpEffect *hex_il_op_l2_loadrhgp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp gp_op = ALIAS2OP(HEX_REG_ALIAS_GP, false); + RzILOpPure *gp = READ_REG(pkt, &gp_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = gp + u; + RzILOpPure *op_ADD_4 = ADD(gp, VARL("u")); + RzILOpEffect *op_ASSIGN_5 = SETL("EA", op_ADD_4); + + // Rd = ((st32) ((st16) mem_load_16(EA))); + RzILOpPure *ml_EA_8 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(16, MSB(ml_EA_8), DUP(ml_EA_8))), CAST(16, MSB(DUP(ml_EA_8)), DUP(ml_EA_8)))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_5, op_ASSIGN_11); + return instruction_sequence; +} + +// Rd = memw(Rs+Ii) +RzILOpEffect *hex_il_op_l2_loadri_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // EA = ((ut32) Rs + s); + RzILOpPure *op_ADD_4 = ADD(Rs, VARL("s")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", CAST(32, IL_FALSE, op_ADD_4)); + + // Rd = ((st32) ((ut32) mem_load_32(EA))); + RzILOpPure *ml_EA_9 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(32, IL_FALSE, ml_EA_9))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, op_ASSIGN_12); + return instruction_sequence; +} + +// Rd = memw(Rx++Mu:brev) +RzILOpEffect *hex_il_op_l2_loadri_pbr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // fbrev(((ut32) Rx)); + RzILOpEffect *fbrev_call_3 = hex_fbrev(CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // h_tmp215 = fbrev(((ut32) Rx)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_5 = SETL("h_tmp215", UNSIGNED(32, VARL("ret_val"))); + + // seq(fbrev(((ut32) Rx)); h_tmp215 = fbrev(((ut32) Rx))); + RzILOpEffect *seq_6 = SEQN(2, fbrev_call_3, op_ASSIGN_hybrid_tmp_5); + + // EA = h_tmp215; + RzILOpEffect *op_ASSIGN_7 = SETL("EA", VARL("h_tmp215")); + + // seq(seq(fbrev(((ut32) Rx)); h_tmp215 = fbrev(((ut32) Rx))); EA = ...; + RzILOpEffect *seq_8 = SEQN(2, seq_6, op_ASSIGN_7); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_10 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rx_op, op_ADD_10); + + // Rd = ((st32) ((ut32) mem_load_32(EA))); + RzILOpPure *ml_EA_14 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(32, IL_FALSE, ml_EA_14))); + + RzILOpEffect *instruction_sequence = SEQN(3, seq_8, op_ASSIGN_11, op_ASSIGN_17); + return instruction_sequence; +} + +// Rd = memw(Rx++Ii:circ(Mu)) +RzILOpEffect *hex_il_op_l2_loadri_pci(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *fcirc_add_call_9 = hex_fcirc_add(bundle, Rx_op, VARL("s"), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp216 = fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_11 = SETL("h_tmp216", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); ...; + RzILOpEffect *seq_12 = SEQN(2, fcirc_add_call_9, op_ASSIGN_hybrid_tmp_11); + + // Rd = ((st32) ((ut32) mem_load_32(EA))); + RzILOpPure *ml_EA_15 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(32, IL_FALSE, ml_EA_15))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_5, seq_12, op_ASSIGN_3, op_ASSIGN_18); + return instruction_sequence; +} + +// Rd = memw(Rx++I:circ(Mu)) +RzILOpEffect *hex_il_op_l2_loadri_pcr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x2)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpPure *op_AND_10 = LOGAND(DUP(Mu), SN(32, 0xf0000000)); + RzILOpPure *op_RSHIFT_12 = SHIFTRA(op_AND_10, SN(32, 21)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(DUP(Mu), SN(32, 17)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0x7f)); + RzILOpPure *op_OR_17 = LOGOR(op_RSHIFT_12, op_AND_16); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(SEXTRACT64(CAST(64, IL_FALSE, op_OR_17), SN(32, 0), SN(32, 11)), SN(32, 2)); + RzILOpEffect *fcirc_add_call_27 = hex_fcirc_add(bundle, Rx_op, CAST(32, MSB(op_LSHIFT_24), DUP(op_LSHIFT_24)), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp217 = fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x2)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_29 = SETL("h_tmp217", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0x ...; + RzILOpEffect *seq_30 = SEQN(2, fcirc_add_call_27, op_ASSIGN_hybrid_tmp_29); + + // Rd = ((st32) ((ut32) mem_load_32(EA))); + RzILOpPure *ml_EA_33 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_36 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(32, IL_FALSE, ml_EA_33))); + + RzILOpEffect *instruction_sequence = SEQN(3, seq_30, op_ASSIGN_3, op_ASSIGN_36); + return instruction_sequence; +} + +// Rd = memw(Rx++Ii) +RzILOpEffect *hex_il_op_l2_loadri_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_7 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rx_op, op_ADD_7); + + // Rd = ((st32) ((ut32) mem_load_32(EA))); + RzILOpPure *ml_EA_11 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_14 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(32, IL_FALSE, ml_EA_11))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_5, op_ASSIGN_3, op_ASSIGN_8, op_ASSIGN_14); + return instruction_sequence; +} + +// Rd = memw(Rx++Mu) +RzILOpEffect *hex_il_op_l2_loadri_pr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_6 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rx_op, op_ADD_6); + + // Rd = ((st32) ((ut32) mem_load_32(EA))); + RzILOpPure *ml_EA_10 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(32, IL_FALSE, ml_EA_10))); + + RzILOpEffect *instruction_sequence = SEQN(3, op_ASSIGN_3, op_ASSIGN_7, op_ASSIGN_13); + return instruction_sequence; +} + +// Rd = memw(gp+Ii) +RzILOpEffect *hex_il_op_l2_loadrigp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp gp_op = ALIAS2OP(HEX_REG_ALIAS_GP, false); + RzILOpPure *gp = READ_REG(pkt, &gp_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = gp + u; + RzILOpPure *op_ADD_4 = ADD(gp, VARL("u")); + RzILOpEffect *op_ASSIGN_5 = SETL("EA", op_ADD_4); + + // Rd = ((st32) ((ut32) mem_load_32(EA))); + RzILOpPure *ml_EA_8 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(32, IL_FALSE, ml_EA_8))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_5, op_ASSIGN_11); + return instruction_sequence; +} + +// Rd = memub(Rs+Ii) +RzILOpEffect *hex_il_op_l2_loadrub_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // EA = ((ut32) Rs + s); + RzILOpPure *op_ADD_4 = ADD(Rs, VARL("s")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", CAST(32, IL_FALSE, op_ADD_4)); + + // Rd = ((st32) ((ut8) mem_load_8(EA))); + RzILOpPure *ml_EA_9 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(8, IL_FALSE, ml_EA_9))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, op_ASSIGN_12); + return instruction_sequence; +} + +// Rd = memub(Rx++Mu:brev) +RzILOpEffect *hex_il_op_l2_loadrub_pbr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // fbrev(((ut32) Rx)); + RzILOpEffect *fbrev_call_3 = hex_fbrev(CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // h_tmp218 = fbrev(((ut32) Rx)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_5 = SETL("h_tmp218", UNSIGNED(32, VARL("ret_val"))); + + // seq(fbrev(((ut32) Rx)); h_tmp218 = fbrev(((ut32) Rx))); + RzILOpEffect *seq_6 = SEQN(2, fbrev_call_3, op_ASSIGN_hybrid_tmp_5); + + // EA = h_tmp218; + RzILOpEffect *op_ASSIGN_7 = SETL("EA", VARL("h_tmp218")); + + // seq(seq(fbrev(((ut32) Rx)); h_tmp218 = fbrev(((ut32) Rx))); EA = ...; + RzILOpEffect *seq_8 = SEQN(2, seq_6, op_ASSIGN_7); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_10 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rx_op, op_ADD_10); + + // Rd = ((st32) ((ut8) mem_load_8(EA))); + RzILOpPure *ml_EA_14 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(8, IL_FALSE, ml_EA_14))); + + RzILOpEffect *instruction_sequence = SEQN(3, seq_8, op_ASSIGN_11, op_ASSIGN_17); + return instruction_sequence; +} + +// Rd = memub(Rx++Ii:circ(Mu)) +RzILOpEffect *hex_il_op_l2_loadrub_pci(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *fcirc_add_call_9 = hex_fcirc_add(bundle, Rx_op, VARL("s"), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp219 = fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_11 = SETL("h_tmp219", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); ...; + RzILOpEffect *seq_12 = SEQN(2, fcirc_add_call_9, op_ASSIGN_hybrid_tmp_11); + + // Rd = ((st32) ((ut8) mem_load_8(EA))); + RzILOpPure *ml_EA_15 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(8, IL_FALSE, ml_EA_15))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_5, seq_12, op_ASSIGN_3, op_ASSIGN_18); + return instruction_sequence; +} + +// Rd = memub(Rx++I:circ(Mu)) +RzILOpEffect *hex_il_op_l2_loadrub_pcr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x0)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpPure *op_AND_10 = LOGAND(DUP(Mu), SN(32, 0xf0000000)); + RzILOpPure *op_RSHIFT_12 = SHIFTRA(op_AND_10, SN(32, 21)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(DUP(Mu), SN(32, 17)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0x7f)); + RzILOpPure *op_OR_17 = LOGOR(op_RSHIFT_12, op_AND_16); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(SEXTRACT64(CAST(64, IL_FALSE, op_OR_17), SN(32, 0), SN(32, 11)), SN(32, 0)); + RzILOpEffect *fcirc_add_call_27 = hex_fcirc_add(bundle, Rx_op, CAST(32, MSB(op_LSHIFT_24), DUP(op_LSHIFT_24)), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp220 = fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x0)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_29 = SETL("h_tmp220", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0x ...; + RzILOpEffect *seq_30 = SEQN(2, fcirc_add_call_27, op_ASSIGN_hybrid_tmp_29); + + // Rd = ((st32) ((ut8) mem_load_8(EA))); + RzILOpPure *ml_EA_33 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_36 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(8, IL_FALSE, ml_EA_33))); + + RzILOpEffect *instruction_sequence = SEQN(3, seq_30, op_ASSIGN_3, op_ASSIGN_36); + return instruction_sequence; +} + +// Rd = memub(Rx++Ii) +RzILOpEffect *hex_il_op_l2_loadrub_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_7 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rx_op, op_ADD_7); + + // Rd = ((st32) ((ut8) mem_load_8(EA))); + RzILOpPure *ml_EA_11 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_14 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(8, IL_FALSE, ml_EA_11))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_5, op_ASSIGN_3, op_ASSIGN_8, op_ASSIGN_14); + return instruction_sequence; +} + +// Rd = memub(Rx++Mu) +RzILOpEffect *hex_il_op_l2_loadrub_pr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_6 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rx_op, op_ADD_6); + + // Rd = ((st32) ((ut8) mem_load_8(EA))); + RzILOpPure *ml_EA_10 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(8, IL_FALSE, ml_EA_10))); + + RzILOpEffect *instruction_sequence = SEQN(3, op_ASSIGN_3, op_ASSIGN_7, op_ASSIGN_13); + return instruction_sequence; +} + +// Rd = memub(gp+Ii) +RzILOpEffect *hex_il_op_l2_loadrubgp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp gp_op = ALIAS2OP(HEX_REG_ALIAS_GP, false); + RzILOpPure *gp = READ_REG(pkt, &gp_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = gp + u; + RzILOpPure *op_ADD_4 = ADD(gp, VARL("u")); + RzILOpEffect *op_ASSIGN_5 = SETL("EA", op_ADD_4); + + // Rd = ((st32) ((ut8) mem_load_8(EA))); + RzILOpPure *ml_EA_8 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(8, IL_FALSE, ml_EA_8))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_5, op_ASSIGN_11); + return instruction_sequence; +} + +// Rd = memuh(Rs+Ii) +RzILOpEffect *hex_il_op_l2_loadruh_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // EA = ((ut32) Rs + s); + RzILOpPure *op_ADD_4 = ADD(Rs, VARL("s")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", CAST(32, IL_FALSE, op_ADD_4)); + + // Rd = ((st32) ((ut16) mem_load_16(EA))); + RzILOpPure *ml_EA_9 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(16, IL_FALSE, ml_EA_9))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, op_ASSIGN_12); + return instruction_sequence; +} + +// Rd = memuh(Rx++Mu:brev) +RzILOpEffect *hex_il_op_l2_loadruh_pbr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // fbrev(((ut32) Rx)); + RzILOpEffect *fbrev_call_3 = hex_fbrev(CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // h_tmp221 = fbrev(((ut32) Rx)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_5 = SETL("h_tmp221", UNSIGNED(32, VARL("ret_val"))); + + // seq(fbrev(((ut32) Rx)); h_tmp221 = fbrev(((ut32) Rx))); + RzILOpEffect *seq_6 = SEQN(2, fbrev_call_3, op_ASSIGN_hybrid_tmp_5); + + // EA = h_tmp221; + RzILOpEffect *op_ASSIGN_7 = SETL("EA", VARL("h_tmp221")); + + // seq(seq(fbrev(((ut32) Rx)); h_tmp221 = fbrev(((ut32) Rx))); EA = ...; + RzILOpEffect *seq_8 = SEQN(2, seq_6, op_ASSIGN_7); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_10 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rx_op, op_ADD_10); + + // Rd = ((st32) ((ut16) mem_load_16(EA))); + RzILOpPure *ml_EA_14 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(16, IL_FALSE, ml_EA_14))); + + RzILOpEffect *instruction_sequence = SEQN(3, seq_8, op_ASSIGN_11, op_ASSIGN_17); + return instruction_sequence; +} + +// Rd = memuh(Rx++Ii:circ(Mu)) +RzILOpEffect *hex_il_op_l2_loadruh_pci(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *fcirc_add_call_9 = hex_fcirc_add(bundle, Rx_op, VARL("s"), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp222 = fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_11 = SETL("h_tmp222", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); ...; + RzILOpEffect *seq_12 = SEQN(2, fcirc_add_call_9, op_ASSIGN_hybrid_tmp_11); + + // Rd = ((st32) ((ut16) mem_load_16(EA))); + RzILOpPure *ml_EA_15 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(16, IL_FALSE, ml_EA_15))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_5, seq_12, op_ASSIGN_3, op_ASSIGN_18); + return instruction_sequence; +} + +// Rd = memuh(Rx++I:circ(Mu)) +RzILOpEffect *hex_il_op_l2_loadruh_pcr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x1)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpPure *op_AND_10 = LOGAND(DUP(Mu), SN(32, 0xf0000000)); + RzILOpPure *op_RSHIFT_12 = SHIFTRA(op_AND_10, SN(32, 21)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(DUP(Mu), SN(32, 17)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0x7f)); + RzILOpPure *op_OR_17 = LOGOR(op_RSHIFT_12, op_AND_16); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(SEXTRACT64(CAST(64, IL_FALSE, op_OR_17), SN(32, 0), SN(32, 11)), SN(32, 1)); + RzILOpEffect *fcirc_add_call_27 = hex_fcirc_add(bundle, Rx_op, CAST(32, MSB(op_LSHIFT_24), DUP(op_LSHIFT_24)), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp223 = fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x1)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_29 = SETL("h_tmp223", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0x ...; + RzILOpEffect *seq_30 = SEQN(2, fcirc_add_call_27, op_ASSIGN_hybrid_tmp_29); + + // Rd = ((st32) ((ut16) mem_load_16(EA))); + RzILOpPure *ml_EA_33 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_36 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(16, IL_FALSE, ml_EA_33))); + + RzILOpEffect *instruction_sequence = SEQN(3, seq_30, op_ASSIGN_3, op_ASSIGN_36); + return instruction_sequence; +} + +// Rd = memuh(Rx++Ii) +RzILOpEffect *hex_il_op_l2_loadruh_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_7 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rx_op, op_ADD_7); + + // Rd = ((st32) ((ut16) mem_load_16(EA))); + RzILOpPure *ml_EA_11 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_14 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(16, IL_FALSE, ml_EA_11))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_5, op_ASSIGN_3, op_ASSIGN_8, op_ASSIGN_14); + return instruction_sequence; +} + +// Rd = memuh(Rx++Mu) +RzILOpEffect *hex_il_op_l2_loadruh_pr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_6 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rx_op, op_ADD_6); + + // Rd = ((st32) ((ut16) mem_load_16(EA))); + RzILOpPure *ml_EA_10 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(16, IL_FALSE, ml_EA_10))); + + RzILOpEffect *instruction_sequence = SEQN(3, op_ASSIGN_3, op_ASSIGN_7, op_ASSIGN_13); + return instruction_sequence; +} + +// Rd = memuh(gp+Ii) +RzILOpEffect *hex_il_op_l2_loadruhgp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp gp_op = ALIAS2OP(HEX_REG_ALIAS_GP, false); + RzILOpPure *gp = READ_REG(pkt, &gp_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = gp + u; + RzILOpPure *op_ADD_4 = ADD(gp, VARL("u")); + RzILOpEffect *op_ASSIGN_5 = SETL("EA", op_ADD_4); + + // Rd = ((st32) ((ut16) mem_load_16(EA))); + RzILOpPure *ml_EA_8 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(16, IL_FALSE, ml_EA_8))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_5, op_ASSIGN_11); + return instruction_sequence; +} + +// Rd = memw_aq(Rs) +RzILOpEffect *hex_il_op_l2_loadw_aq(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rs); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, Rs)); + + // Rd = ((st32) ((ut32) mem_load_32(EA))); + RzILOpPure *ml_EA_6 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(32, IL_FALSE, ml_EA_6))); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_3, op_ASSIGN_9); + return instruction_sequence; +} + +// Rd = memw_locked(Rs) +RzILOpEffect *hex_il_op_l2_loadw_locked(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (!Pt) Rd = memb(Rs+Ii) +RzILOpEffect *hex_il_op_l2_ploadrbf_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // Rd = ((st32) ((st8) mem_load_8(EA))); + RzILOpPure *ml_EA_14 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(8, MSB(ml_EA_14), DUP(ml_EA_14))), CAST(8, MSB(DUP(ml_EA_14)), DUP(ml_EA_14)))); + + // nop; + RzILOpEffect *nop_18 = NOP(); + + // seq(Rd = ((st32) ((st8) mem_load_8(EA)))); + RzILOpEffect *seq_then_20 = op_ASSIGN_17; + + // seq(nop); + RzILOpEffect *seq_else_21 = nop_18; + + // if (! (((st32) Pt) & 0x1)) {seq(Rd = ((st32) ((st8) mem_load_8(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 1)); + RzILOpPure *op_INV_12 = INV(NON_ZERO(op_AND_11)); + RzILOpEffect *branch_22 = BRANCH(op_INV_12, seq_then_20, seq_else_21); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_22); + return instruction_sequence; +} + +// if (!Pt) Rd = memb(Rx++Ii) +RzILOpEffect *hex_il_op_l2_ploadrbf_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_10 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_12 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rx_op, op_ADD_12); + + // Rd = ((st32) ((st8) mem_load_8(EA))); + RzILOpPure *ml_EA_16 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_19 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(8, MSB(ml_EA_16), DUP(ml_EA_16))), CAST(8, MSB(DUP(ml_EA_16)), DUP(ml_EA_16)))); + + // nop; + RzILOpEffect *nop_20 = NOP(); + + // seq(Rx = Rx + s; Rd = ((st32) ((st8) mem_load_8(EA)))); + RzILOpEffect *seq_then_22 = SEQN(2, op_ASSIGN_13, op_ASSIGN_19); + + // seq(nop); + RzILOpEffect *seq_else_23 = nop_20; + + // if (! (((st32) Pt) & 0x1)) {seq(Rx = Rx + s; Rd = ((st32) ((st8) mem_load_8(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_24 = BRANCH(op_INV_9, seq_then_22, seq_else_23); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_10, op_ASSIGN_3, branch_24); + return instruction_sequence; +} + +// if (!Pt.new) Rd = memb(Rs+Ii) +RzILOpEffect *hex_il_op_l2_ploadrbfnew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pt_new_op = ISA2REG(hi, 't', true); + RzILOpPure *Pt_new = READ_REG(pkt, Pt_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // Rd = ((st32) ((st8) mem_load_8(EA))); + RzILOpPure *ml_EA_14 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(8, MSB(ml_EA_14), DUP(ml_EA_14))), CAST(8, MSB(DUP(ml_EA_14)), DUP(ml_EA_14)))); + + // nop; + RzILOpEffect *nop_18 = NOP(); + + // seq(Rd = ((st32) ((st8) mem_load_8(EA)))); + RzILOpEffect *seq_then_20 = op_ASSIGN_17; + + // seq(nop); + RzILOpEffect *seq_else_21 = nop_18; + + // if (! (((st32) Pt_new) & 0x1)) {seq(Rd = ((st32) ((st8) mem_load_8(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pt_new), DUP(Pt_new)), SN(32, 1)); + RzILOpPure *op_INV_12 = INV(NON_ZERO(op_AND_11)); + RzILOpEffect *branch_22 = BRANCH(op_INV_12, seq_then_20, seq_else_21); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_22); + return instruction_sequence; +} + +// if (!Pt.new) Rd = memb(Rx++Ii) +RzILOpEffect *hex_il_op_l2_ploadrbfnew_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pt_new_op = ISA2REG(hi, 't', true); + RzILOpPure *Pt_new = READ_REG(pkt, Pt_new_op, true); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_10 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_12 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rx_op, op_ADD_12); + + // Rd = ((st32) ((st8) mem_load_8(EA))); + RzILOpPure *ml_EA_16 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_19 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(8, MSB(ml_EA_16), DUP(ml_EA_16))), CAST(8, MSB(DUP(ml_EA_16)), DUP(ml_EA_16)))); + + // nop; + RzILOpEffect *nop_20 = NOP(); + + // seq(Rx = Rx + s; Rd = ((st32) ((st8) mem_load_8(EA)))); + RzILOpEffect *seq_then_22 = SEQN(2, op_ASSIGN_13, op_ASSIGN_19); + + // seq(nop); + RzILOpEffect *seq_else_23 = nop_20; + + // if (! (((st32) Pt_new) & 0x1)) {seq(Rx = Rx + s; Rd = ((st32) ((st8) mem_load_8(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt_new), DUP(Pt_new)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_24 = BRANCH(op_INV_9, seq_then_22, seq_else_23); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_10, op_ASSIGN_3, branch_24); + return instruction_sequence; +} + +// if (Pt) Rd = memb(Rs+Ii) +RzILOpEffect *hex_il_op_l2_ploadrbt_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // Rd = ((st32) ((st8) mem_load_8(EA))); + RzILOpPure *ml_EA_13 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_16 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(8, MSB(ml_EA_13), DUP(ml_EA_13))), CAST(8, MSB(DUP(ml_EA_13)), DUP(ml_EA_13)))); + + // nop; + RzILOpEffect *nop_17 = NOP(); + + // seq(Rd = ((st32) ((st8) mem_load_8(EA)))); + RzILOpEffect *seq_then_19 = op_ASSIGN_16; + + // seq(nop); + RzILOpEffect *seq_else_20 = nop_17; + + // if ((((st32) Pt) & 0x1)) {seq(Rd = ((st32) ((st8) mem_load_8(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 1)); + RzILOpEffect *branch_21 = BRANCH(NON_ZERO(op_AND_11), seq_then_19, seq_else_20); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_21); + return instruction_sequence; +} + +// if (Pt) Rd = memb(Rx++Ii) +RzILOpEffect *hex_il_op_l2_ploadrbt_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_9 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_11 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rx_op, op_ADD_11); + + // Rd = ((st32) ((st8) mem_load_8(EA))); + RzILOpPure *ml_EA_15 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(8, MSB(ml_EA_15), DUP(ml_EA_15))), CAST(8, MSB(DUP(ml_EA_15)), DUP(ml_EA_15)))); + + // nop; + RzILOpEffect *nop_19 = NOP(); + + // seq(Rx = Rx + s; Rd = ((st32) ((st8) mem_load_8(EA)))); + RzILOpEffect *seq_then_21 = SEQN(2, op_ASSIGN_12, op_ASSIGN_18); + + // seq(nop); + RzILOpEffect *seq_else_22 = nop_19; + + // if ((((st32) Pt) & 0x1)) {seq(Rx = Rx + s; Rd = ((st32) ((st8) mem_load_8(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 1)); + RzILOpEffect *branch_23 = BRANCH(NON_ZERO(op_AND_8), seq_then_21, seq_else_22); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_9, op_ASSIGN_3, branch_23); + return instruction_sequence; +} + +// if (Pt.new) Rd = memb(Rs+Ii) +RzILOpEffect *hex_il_op_l2_ploadrbtnew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pt_new_op = ISA2REG(hi, 't', true); + RzILOpPure *Pt_new = READ_REG(pkt, Pt_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // Rd = ((st32) ((st8) mem_load_8(EA))); + RzILOpPure *ml_EA_13 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_16 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(8, MSB(ml_EA_13), DUP(ml_EA_13))), CAST(8, MSB(DUP(ml_EA_13)), DUP(ml_EA_13)))); + + // nop; + RzILOpEffect *nop_17 = NOP(); + + // seq(Rd = ((st32) ((st8) mem_load_8(EA)))); + RzILOpEffect *seq_then_19 = op_ASSIGN_16; + + // seq(nop); + RzILOpEffect *seq_else_20 = nop_17; + + // if ((((st32) Pt_new) & 0x1)) {seq(Rd = ((st32) ((st8) mem_load_8(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pt_new), DUP(Pt_new)), SN(32, 1)); + RzILOpEffect *branch_21 = BRANCH(NON_ZERO(op_AND_11), seq_then_19, seq_else_20); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_21); + return instruction_sequence; +} + +// if (Pt.new) Rd = memb(Rx++Ii) +RzILOpEffect *hex_il_op_l2_ploadrbtnew_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pt_new_op = ISA2REG(hi, 't', true); + RzILOpPure *Pt_new = READ_REG(pkt, Pt_new_op, true); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_9 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_11 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rx_op, op_ADD_11); + + // Rd = ((st32) ((st8) mem_load_8(EA))); + RzILOpPure *ml_EA_15 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(8, MSB(ml_EA_15), DUP(ml_EA_15))), CAST(8, MSB(DUP(ml_EA_15)), DUP(ml_EA_15)))); + + // nop; + RzILOpEffect *nop_19 = NOP(); + + // seq(Rx = Rx + s; Rd = ((st32) ((st8) mem_load_8(EA)))); + RzILOpEffect *seq_then_21 = SEQN(2, op_ASSIGN_12, op_ASSIGN_18); + + // seq(nop); + RzILOpEffect *seq_else_22 = nop_19; + + // if ((((st32) Pt_new) & 0x1)) {seq(Rx = Rx + s; Rd = ((st32) ((st8) mem_load_8(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt_new), DUP(Pt_new)), SN(32, 1)); + RzILOpEffect *branch_23 = BRANCH(NON_ZERO(op_AND_8), seq_then_21, seq_else_22); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_9, op_ASSIGN_3, branch_23); + return instruction_sequence; +} + +// if (!Pt) Rdd = memd(Rs+Ii) +RzILOpEffect *hex_il_op_l2_ploadrdf_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // Rdd = ((st64) ((ut64) mem_load_64(EA))); + RzILOpPure *ml_EA_14 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, CAST(64, IL_FALSE, ml_EA_14))); + + // nop; + RzILOpEffect *nop_18 = NOP(); + + // seq(Rdd = ((st64) ((ut64) mem_load_64(EA)))); + RzILOpEffect *seq_then_20 = op_ASSIGN_17; + + // seq(nop); + RzILOpEffect *seq_else_21 = nop_18; + + // if (! (((st32) Pt) & 0x1)) {seq(Rdd = ((st64) ((ut64) mem_load_64(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 1)); + RzILOpPure *op_INV_12 = INV(NON_ZERO(op_AND_11)); + RzILOpEffect *branch_22 = BRANCH(op_INV_12, seq_then_20, seq_else_21); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_22); + return instruction_sequence; +} + +// if (!Pt) Rdd = memd(Rx++Ii) +RzILOpEffect *hex_il_op_l2_ploadrdf_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_10 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_12 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rx_op, op_ADD_12); + + // Rdd = ((st64) ((ut64) mem_load_64(EA))); + RzILOpPure *ml_EA_16 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_19 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, CAST(64, IL_FALSE, ml_EA_16))); + + // nop; + RzILOpEffect *nop_20 = NOP(); + + // seq(Rx = Rx + s; Rdd = ((st64) ((ut64) mem_load_64(EA)))); + RzILOpEffect *seq_then_22 = SEQN(2, op_ASSIGN_13, op_ASSIGN_19); + + // seq(nop); + RzILOpEffect *seq_else_23 = nop_20; + + // if (! (((st32) Pt) & 0x1)) {seq(Rx = Rx + s; Rdd = ((st64) ((ut64) mem_load_64(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_24 = BRANCH(op_INV_9, seq_then_22, seq_else_23); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_10, op_ASSIGN_3, branch_24); + return instruction_sequence; +} + +// if (!Pt.new) Rdd = memd(Rs+Ii) +RzILOpEffect *hex_il_op_l2_ploadrdfnew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pt_new_op = ISA2REG(hi, 't', true); + RzILOpPure *Pt_new = READ_REG(pkt, Pt_new_op, true); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // Rdd = ((st64) ((ut64) mem_load_64(EA))); + RzILOpPure *ml_EA_14 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, CAST(64, IL_FALSE, ml_EA_14))); + + // nop; + RzILOpEffect *nop_18 = NOP(); + + // seq(Rdd = ((st64) ((ut64) mem_load_64(EA)))); + RzILOpEffect *seq_then_20 = op_ASSIGN_17; + + // seq(nop); + RzILOpEffect *seq_else_21 = nop_18; + + // if (! (((st32) Pt_new) & 0x1)) {seq(Rdd = ((st64) ((ut64) mem_load_64(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pt_new), DUP(Pt_new)), SN(32, 1)); + RzILOpPure *op_INV_12 = INV(NON_ZERO(op_AND_11)); + RzILOpEffect *branch_22 = BRANCH(op_INV_12, seq_then_20, seq_else_21); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_22); + return instruction_sequence; +} + +// if (!Pt.new) Rdd = memd(Rx++Ii) +RzILOpEffect *hex_il_op_l2_ploadrdfnew_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pt_new_op = ISA2REG(hi, 't', true); + RzILOpPure *Pt_new = READ_REG(pkt, Pt_new_op, true); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_10 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_12 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rx_op, op_ADD_12); + + // Rdd = ((st64) ((ut64) mem_load_64(EA))); + RzILOpPure *ml_EA_16 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_19 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, CAST(64, IL_FALSE, ml_EA_16))); + + // nop; + RzILOpEffect *nop_20 = NOP(); + + // seq(Rx = Rx + s; Rdd = ((st64) ((ut64) mem_load_64(EA)))); + RzILOpEffect *seq_then_22 = SEQN(2, op_ASSIGN_13, op_ASSIGN_19); + + // seq(nop); + RzILOpEffect *seq_else_23 = nop_20; + + // if (! (((st32) Pt_new) & 0x1)) {seq(Rx = Rx + s; Rdd = ((st64) ((ut64) mem_load_64(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt_new), DUP(Pt_new)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_24 = BRANCH(op_INV_9, seq_then_22, seq_else_23); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_10, op_ASSIGN_3, branch_24); + return instruction_sequence; +} + +// if (Pt) Rdd = memd(Rs+Ii) +RzILOpEffect *hex_il_op_l2_ploadrdt_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // Rdd = ((st64) ((ut64) mem_load_64(EA))); + RzILOpPure *ml_EA_13 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_16 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, CAST(64, IL_FALSE, ml_EA_13))); + + // nop; + RzILOpEffect *nop_17 = NOP(); + + // seq(Rdd = ((st64) ((ut64) mem_load_64(EA)))); + RzILOpEffect *seq_then_19 = op_ASSIGN_16; + + // seq(nop); + RzILOpEffect *seq_else_20 = nop_17; + + // if ((((st32) Pt) & 0x1)) {seq(Rdd = ((st64) ((ut64) mem_load_64(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 1)); + RzILOpEffect *branch_21 = BRANCH(NON_ZERO(op_AND_11), seq_then_19, seq_else_20); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_21); + return instruction_sequence; +} + +// if (Pt) Rdd = memd(Rx++Ii) +RzILOpEffect *hex_il_op_l2_ploadrdt_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_9 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_11 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rx_op, op_ADD_11); + + // Rdd = ((st64) ((ut64) mem_load_64(EA))); + RzILOpPure *ml_EA_15 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, CAST(64, IL_FALSE, ml_EA_15))); + + // nop; + RzILOpEffect *nop_19 = NOP(); + + // seq(Rx = Rx + s; Rdd = ((st64) ((ut64) mem_load_64(EA)))); + RzILOpEffect *seq_then_21 = SEQN(2, op_ASSIGN_12, op_ASSIGN_18); + + // seq(nop); + RzILOpEffect *seq_else_22 = nop_19; + + // if ((((st32) Pt) & 0x1)) {seq(Rx = Rx + s; Rdd = ((st64) ((ut64) mem_load_64(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 1)); + RzILOpEffect *branch_23 = BRANCH(NON_ZERO(op_AND_8), seq_then_21, seq_else_22); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_9, op_ASSIGN_3, branch_23); + return instruction_sequence; +} + +// if (Pt.new) Rdd = memd(Rs+Ii) +RzILOpEffect *hex_il_op_l2_ploadrdtnew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pt_new_op = ISA2REG(hi, 't', true); + RzILOpPure *Pt_new = READ_REG(pkt, Pt_new_op, true); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // Rdd = ((st64) ((ut64) mem_load_64(EA))); + RzILOpPure *ml_EA_13 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_16 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, CAST(64, IL_FALSE, ml_EA_13))); + + // nop; + RzILOpEffect *nop_17 = NOP(); + + // seq(Rdd = ((st64) ((ut64) mem_load_64(EA)))); + RzILOpEffect *seq_then_19 = op_ASSIGN_16; + + // seq(nop); + RzILOpEffect *seq_else_20 = nop_17; + + // if ((((st32) Pt_new) & 0x1)) {seq(Rdd = ((st64) ((ut64) mem_load_64(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pt_new), DUP(Pt_new)), SN(32, 1)); + RzILOpEffect *branch_21 = BRANCH(NON_ZERO(op_AND_11), seq_then_19, seq_else_20); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_21); + return instruction_sequence; +} + +// if (Pt.new) Rdd = memd(Rx++Ii) +RzILOpEffect *hex_il_op_l2_ploadrdtnew_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pt_new_op = ISA2REG(hi, 't', true); + RzILOpPure *Pt_new = READ_REG(pkt, Pt_new_op, true); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_9 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_11 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rx_op, op_ADD_11); + + // Rdd = ((st64) ((ut64) mem_load_64(EA))); + RzILOpPure *ml_EA_15 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, CAST(64, IL_FALSE, ml_EA_15))); + + // nop; + RzILOpEffect *nop_19 = NOP(); + + // seq(Rx = Rx + s; Rdd = ((st64) ((ut64) mem_load_64(EA)))); + RzILOpEffect *seq_then_21 = SEQN(2, op_ASSIGN_12, op_ASSIGN_18); + + // seq(nop); + RzILOpEffect *seq_else_22 = nop_19; + + // if ((((st32) Pt_new) & 0x1)) {seq(Rx = Rx + s; Rdd = ((st64) ((ut64) mem_load_64(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt_new), DUP(Pt_new)), SN(32, 1)); + RzILOpEffect *branch_23 = BRANCH(NON_ZERO(op_AND_8), seq_then_21, seq_else_22); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_9, op_ASSIGN_3, branch_23); + return instruction_sequence; +} + +// if (!Pt) Rd = memh(Rs+Ii) +RzILOpEffect *hex_il_op_l2_ploadrhf_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // Rd = ((st32) ((st16) mem_load_16(EA))); + RzILOpPure *ml_EA_14 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(16, MSB(ml_EA_14), DUP(ml_EA_14))), CAST(16, MSB(DUP(ml_EA_14)), DUP(ml_EA_14)))); + + // nop; + RzILOpEffect *nop_18 = NOP(); + + // seq(Rd = ((st32) ((st16) mem_load_16(EA)))); + RzILOpEffect *seq_then_20 = op_ASSIGN_17; + + // seq(nop); + RzILOpEffect *seq_else_21 = nop_18; + + // if (! (((st32) Pt) & 0x1)) {seq(Rd = ((st32) ((st16) mem_load_16(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 1)); + RzILOpPure *op_INV_12 = INV(NON_ZERO(op_AND_11)); + RzILOpEffect *branch_22 = BRANCH(op_INV_12, seq_then_20, seq_else_21); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_22); + return instruction_sequence; +} + +// if (!Pt) Rd = memh(Rx++Ii) +RzILOpEffect *hex_il_op_l2_ploadrhf_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_10 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_12 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rx_op, op_ADD_12); + + // Rd = ((st32) ((st16) mem_load_16(EA))); + RzILOpPure *ml_EA_16 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_19 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(16, MSB(ml_EA_16), DUP(ml_EA_16))), CAST(16, MSB(DUP(ml_EA_16)), DUP(ml_EA_16)))); + + // nop; + RzILOpEffect *nop_20 = NOP(); + + // seq(Rx = Rx + s; Rd = ((st32) ((st16) mem_load_16(EA)))); + RzILOpEffect *seq_then_22 = SEQN(2, op_ASSIGN_13, op_ASSIGN_19); + + // seq(nop); + RzILOpEffect *seq_else_23 = nop_20; + + // if (! (((st32) Pt) & 0x1)) {seq(Rx = Rx + s; Rd = ((st32) ((st16) mem_load_16(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_24 = BRANCH(op_INV_9, seq_then_22, seq_else_23); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_10, op_ASSIGN_3, branch_24); + return instruction_sequence; +} + +// if (!Pt.new) Rd = memh(Rs+Ii) +RzILOpEffect *hex_il_op_l2_ploadrhfnew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pt_new_op = ISA2REG(hi, 't', true); + RzILOpPure *Pt_new = READ_REG(pkt, Pt_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // Rd = ((st32) ((st16) mem_load_16(EA))); + RzILOpPure *ml_EA_14 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(16, MSB(ml_EA_14), DUP(ml_EA_14))), CAST(16, MSB(DUP(ml_EA_14)), DUP(ml_EA_14)))); + + // nop; + RzILOpEffect *nop_18 = NOP(); + + // seq(Rd = ((st32) ((st16) mem_load_16(EA)))); + RzILOpEffect *seq_then_20 = op_ASSIGN_17; + + // seq(nop); + RzILOpEffect *seq_else_21 = nop_18; + + // if (! (((st32) Pt_new) & 0x1)) {seq(Rd = ((st32) ((st16) mem_load_16(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pt_new), DUP(Pt_new)), SN(32, 1)); + RzILOpPure *op_INV_12 = INV(NON_ZERO(op_AND_11)); + RzILOpEffect *branch_22 = BRANCH(op_INV_12, seq_then_20, seq_else_21); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_22); + return instruction_sequence; +} + +// if (!Pt.new) Rd = memh(Rx++Ii) +RzILOpEffect *hex_il_op_l2_ploadrhfnew_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pt_new_op = ISA2REG(hi, 't', true); + RzILOpPure *Pt_new = READ_REG(pkt, Pt_new_op, true); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_10 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_12 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rx_op, op_ADD_12); + + // Rd = ((st32) ((st16) mem_load_16(EA))); + RzILOpPure *ml_EA_16 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_19 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(16, MSB(ml_EA_16), DUP(ml_EA_16))), CAST(16, MSB(DUP(ml_EA_16)), DUP(ml_EA_16)))); + + // nop; + RzILOpEffect *nop_20 = NOP(); + + // seq(Rx = Rx + s; Rd = ((st32) ((st16) mem_load_16(EA)))); + RzILOpEffect *seq_then_22 = SEQN(2, op_ASSIGN_13, op_ASSIGN_19); + + // seq(nop); + RzILOpEffect *seq_else_23 = nop_20; + + // if (! (((st32) Pt_new) & 0x1)) {seq(Rx = Rx + s; Rd = ((st32) ((st16) mem_load_16(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt_new), DUP(Pt_new)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_24 = BRANCH(op_INV_9, seq_then_22, seq_else_23); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_10, op_ASSIGN_3, branch_24); + return instruction_sequence; +} + +// if (Pt) Rd = memh(Rs+Ii) +RzILOpEffect *hex_il_op_l2_ploadrht_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // Rd = ((st32) ((st16) mem_load_16(EA))); + RzILOpPure *ml_EA_13 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_16 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(16, MSB(ml_EA_13), DUP(ml_EA_13))), CAST(16, MSB(DUP(ml_EA_13)), DUP(ml_EA_13)))); + + // nop; + RzILOpEffect *nop_17 = NOP(); + + // seq(Rd = ((st32) ((st16) mem_load_16(EA)))); + RzILOpEffect *seq_then_19 = op_ASSIGN_16; + + // seq(nop); + RzILOpEffect *seq_else_20 = nop_17; + + // if ((((st32) Pt) & 0x1)) {seq(Rd = ((st32) ((st16) mem_load_16(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 1)); + RzILOpEffect *branch_21 = BRANCH(NON_ZERO(op_AND_11), seq_then_19, seq_else_20); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_21); + return instruction_sequence; +} + +// if (Pt) Rd = memh(Rx++Ii) +RzILOpEffect *hex_il_op_l2_ploadrht_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_9 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_11 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rx_op, op_ADD_11); + + // Rd = ((st32) ((st16) mem_load_16(EA))); + RzILOpPure *ml_EA_15 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(16, MSB(ml_EA_15), DUP(ml_EA_15))), CAST(16, MSB(DUP(ml_EA_15)), DUP(ml_EA_15)))); + + // nop; + RzILOpEffect *nop_19 = NOP(); + + // seq(Rx = Rx + s; Rd = ((st32) ((st16) mem_load_16(EA)))); + RzILOpEffect *seq_then_21 = SEQN(2, op_ASSIGN_12, op_ASSIGN_18); + + // seq(nop); + RzILOpEffect *seq_else_22 = nop_19; + + // if ((((st32) Pt) & 0x1)) {seq(Rx = Rx + s; Rd = ((st32) ((st16) mem_load_16(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 1)); + RzILOpEffect *branch_23 = BRANCH(NON_ZERO(op_AND_8), seq_then_21, seq_else_22); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_9, op_ASSIGN_3, branch_23); + return instruction_sequence; +} + +// if (Pt.new) Rd = memh(Rs+Ii) +RzILOpEffect *hex_il_op_l2_ploadrhtnew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pt_new_op = ISA2REG(hi, 't', true); + RzILOpPure *Pt_new = READ_REG(pkt, Pt_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // Rd = ((st32) ((st16) mem_load_16(EA))); + RzILOpPure *ml_EA_13 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_16 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(16, MSB(ml_EA_13), DUP(ml_EA_13))), CAST(16, MSB(DUP(ml_EA_13)), DUP(ml_EA_13)))); + + // nop; + RzILOpEffect *nop_17 = NOP(); + + // seq(Rd = ((st32) ((st16) mem_load_16(EA)))); + RzILOpEffect *seq_then_19 = op_ASSIGN_16; + + // seq(nop); + RzILOpEffect *seq_else_20 = nop_17; + + // if ((((st32) Pt_new) & 0x1)) {seq(Rd = ((st32) ((st16) mem_load_16(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pt_new), DUP(Pt_new)), SN(32, 1)); + RzILOpEffect *branch_21 = BRANCH(NON_ZERO(op_AND_11), seq_then_19, seq_else_20); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_21); + return instruction_sequence; +} + +// if (Pt.new) Rd = memh(Rx++Ii) +RzILOpEffect *hex_il_op_l2_ploadrhtnew_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pt_new_op = ISA2REG(hi, 't', true); + RzILOpPure *Pt_new = READ_REG(pkt, Pt_new_op, true); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_9 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_11 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rx_op, op_ADD_11); + + // Rd = ((st32) ((st16) mem_load_16(EA))); + RzILOpPure *ml_EA_15 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(16, MSB(ml_EA_15), DUP(ml_EA_15))), CAST(16, MSB(DUP(ml_EA_15)), DUP(ml_EA_15)))); + + // nop; + RzILOpEffect *nop_19 = NOP(); + + // seq(Rx = Rx + s; Rd = ((st32) ((st16) mem_load_16(EA)))); + RzILOpEffect *seq_then_21 = SEQN(2, op_ASSIGN_12, op_ASSIGN_18); + + // seq(nop); + RzILOpEffect *seq_else_22 = nop_19; + + // if ((((st32) Pt_new) & 0x1)) {seq(Rx = Rx + s; Rd = ((st32) ((st16) mem_load_16(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt_new), DUP(Pt_new)), SN(32, 1)); + RzILOpEffect *branch_23 = BRANCH(NON_ZERO(op_AND_8), seq_then_21, seq_else_22); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_9, op_ASSIGN_3, branch_23); + return instruction_sequence; +} + +// if (!Pt) Rd = memw(Rs+Ii) +RzILOpEffect *hex_il_op_l2_ploadrif_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // Rd = ((st32) ((ut32) mem_load_32(EA))); + RzILOpPure *ml_EA_14 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(32, IL_FALSE, ml_EA_14))); + + // nop; + RzILOpEffect *nop_18 = NOP(); + + // seq(Rd = ((st32) ((ut32) mem_load_32(EA)))); + RzILOpEffect *seq_then_20 = op_ASSIGN_17; + + // seq(nop); + RzILOpEffect *seq_else_21 = nop_18; + + // if (! (((st32) Pt) & 0x1)) {seq(Rd = ((st32) ((ut32) mem_load_32(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 1)); + RzILOpPure *op_INV_12 = INV(NON_ZERO(op_AND_11)); + RzILOpEffect *branch_22 = BRANCH(op_INV_12, seq_then_20, seq_else_21); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_22); + return instruction_sequence; +} + +// if (!Pt) Rd = memw(Rx++Ii) +RzILOpEffect *hex_il_op_l2_ploadrif_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_10 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_12 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rx_op, op_ADD_12); + + // Rd = ((st32) ((ut32) mem_load_32(EA))); + RzILOpPure *ml_EA_16 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_19 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(32, IL_FALSE, ml_EA_16))); + + // nop; + RzILOpEffect *nop_20 = NOP(); + + // seq(Rx = Rx + s; Rd = ((st32) ((ut32) mem_load_32(EA)))); + RzILOpEffect *seq_then_22 = SEQN(2, op_ASSIGN_13, op_ASSIGN_19); + + // seq(nop); + RzILOpEffect *seq_else_23 = nop_20; + + // if (! (((st32) Pt) & 0x1)) {seq(Rx = Rx + s; Rd = ((st32) ((ut32) mem_load_32(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_24 = BRANCH(op_INV_9, seq_then_22, seq_else_23); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_10, op_ASSIGN_3, branch_24); + return instruction_sequence; +} + +// if (!Pt.new) Rd = memw(Rs+Ii) +RzILOpEffect *hex_il_op_l2_ploadrifnew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pt_new_op = ISA2REG(hi, 't', true); + RzILOpPure *Pt_new = READ_REG(pkt, Pt_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // Rd = ((st32) ((ut32) mem_load_32(EA))); + RzILOpPure *ml_EA_14 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(32, IL_FALSE, ml_EA_14))); + + // nop; + RzILOpEffect *nop_18 = NOP(); + + // seq(Rd = ((st32) ((ut32) mem_load_32(EA)))); + RzILOpEffect *seq_then_20 = op_ASSIGN_17; + + // seq(nop); + RzILOpEffect *seq_else_21 = nop_18; + + // if (! (((st32) Pt_new) & 0x1)) {seq(Rd = ((st32) ((ut32) mem_load_32(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pt_new), DUP(Pt_new)), SN(32, 1)); + RzILOpPure *op_INV_12 = INV(NON_ZERO(op_AND_11)); + RzILOpEffect *branch_22 = BRANCH(op_INV_12, seq_then_20, seq_else_21); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_22); + return instruction_sequence; +} + +// if (!Pt.new) Rd = memw(Rx++Ii) +RzILOpEffect *hex_il_op_l2_ploadrifnew_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pt_new_op = ISA2REG(hi, 't', true); + RzILOpPure *Pt_new = READ_REG(pkt, Pt_new_op, true); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_10 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_12 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rx_op, op_ADD_12); + + // Rd = ((st32) ((ut32) mem_load_32(EA))); + RzILOpPure *ml_EA_16 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_19 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(32, IL_FALSE, ml_EA_16))); + + // nop; + RzILOpEffect *nop_20 = NOP(); + + // seq(Rx = Rx + s; Rd = ((st32) ((ut32) mem_load_32(EA)))); + RzILOpEffect *seq_then_22 = SEQN(2, op_ASSIGN_13, op_ASSIGN_19); + + // seq(nop); + RzILOpEffect *seq_else_23 = nop_20; + + // if (! (((st32) Pt_new) & 0x1)) {seq(Rx = Rx + s; Rd = ((st32) ((ut32) mem_load_32(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt_new), DUP(Pt_new)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_24 = BRANCH(op_INV_9, seq_then_22, seq_else_23); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_10, op_ASSIGN_3, branch_24); + return instruction_sequence; +} + +// if (Pt) Rd = memw(Rs+Ii) +RzILOpEffect *hex_il_op_l2_ploadrit_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // Rd = ((st32) ((ut32) mem_load_32(EA))); + RzILOpPure *ml_EA_13 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_16 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(32, IL_FALSE, ml_EA_13))); + + // nop; + RzILOpEffect *nop_17 = NOP(); + + // seq(Rd = ((st32) ((ut32) mem_load_32(EA)))); + RzILOpEffect *seq_then_19 = op_ASSIGN_16; + + // seq(nop); + RzILOpEffect *seq_else_20 = nop_17; + + // if ((((st32) Pt) & 0x1)) {seq(Rd = ((st32) ((ut32) mem_load_32(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 1)); + RzILOpEffect *branch_21 = BRANCH(NON_ZERO(op_AND_11), seq_then_19, seq_else_20); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_21); + return instruction_sequence; +} + +// if (Pt) Rd = memw(Rx++Ii) +RzILOpEffect *hex_il_op_l2_ploadrit_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_9 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_11 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rx_op, op_ADD_11); + + // Rd = ((st32) ((ut32) mem_load_32(EA))); + RzILOpPure *ml_EA_15 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(32, IL_FALSE, ml_EA_15))); + + // nop; + RzILOpEffect *nop_19 = NOP(); + + // seq(Rx = Rx + s; Rd = ((st32) ((ut32) mem_load_32(EA)))); + RzILOpEffect *seq_then_21 = SEQN(2, op_ASSIGN_12, op_ASSIGN_18); + + // seq(nop); + RzILOpEffect *seq_else_22 = nop_19; + + // if ((((st32) Pt) & 0x1)) {seq(Rx = Rx + s; Rd = ((st32) ((ut32) mem_load_32(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 1)); + RzILOpEffect *branch_23 = BRANCH(NON_ZERO(op_AND_8), seq_then_21, seq_else_22); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_9, op_ASSIGN_3, branch_23); + return instruction_sequence; +} + +// if (Pt.new) Rd = memw(Rs+Ii) +RzILOpEffect *hex_il_op_l2_ploadritnew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pt_new_op = ISA2REG(hi, 't', true); + RzILOpPure *Pt_new = READ_REG(pkt, Pt_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // Rd = ((st32) ((ut32) mem_load_32(EA))); + RzILOpPure *ml_EA_13 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_16 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(32, IL_FALSE, ml_EA_13))); + + // nop; + RzILOpEffect *nop_17 = NOP(); + + // seq(Rd = ((st32) ((ut32) mem_load_32(EA)))); + RzILOpEffect *seq_then_19 = op_ASSIGN_16; + + // seq(nop); + RzILOpEffect *seq_else_20 = nop_17; + + // if ((((st32) Pt_new) & 0x1)) {seq(Rd = ((st32) ((ut32) mem_load_32(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pt_new), DUP(Pt_new)), SN(32, 1)); + RzILOpEffect *branch_21 = BRANCH(NON_ZERO(op_AND_11), seq_then_19, seq_else_20); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_21); + return instruction_sequence; +} + +// if (Pt.new) Rd = memw(Rx++Ii) +RzILOpEffect *hex_il_op_l2_ploadritnew_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pt_new_op = ISA2REG(hi, 't', true); + RzILOpPure *Pt_new = READ_REG(pkt, Pt_new_op, true); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_9 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_11 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rx_op, op_ADD_11); + + // Rd = ((st32) ((ut32) mem_load_32(EA))); + RzILOpPure *ml_EA_15 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(32, IL_FALSE, ml_EA_15))); + + // nop; + RzILOpEffect *nop_19 = NOP(); + + // seq(Rx = Rx + s; Rd = ((st32) ((ut32) mem_load_32(EA)))); + RzILOpEffect *seq_then_21 = SEQN(2, op_ASSIGN_12, op_ASSIGN_18); + + // seq(nop); + RzILOpEffect *seq_else_22 = nop_19; + + // if ((((st32) Pt_new) & 0x1)) {seq(Rx = Rx + s; Rd = ((st32) ((ut32) mem_load_32(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt_new), DUP(Pt_new)), SN(32, 1)); + RzILOpEffect *branch_23 = BRANCH(NON_ZERO(op_AND_8), seq_then_21, seq_else_22); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_9, op_ASSIGN_3, branch_23); + return instruction_sequence; +} + +// if (!Pt) Rd = memub(Rs+Ii) +RzILOpEffect *hex_il_op_l2_ploadrubf_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // Rd = ((st32) ((ut8) mem_load_8(EA))); + RzILOpPure *ml_EA_14 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(8, IL_FALSE, ml_EA_14))); + + // nop; + RzILOpEffect *nop_18 = NOP(); + + // seq(Rd = ((st32) ((ut8) mem_load_8(EA)))); + RzILOpEffect *seq_then_20 = op_ASSIGN_17; + + // seq(nop); + RzILOpEffect *seq_else_21 = nop_18; + + // if (! (((st32) Pt) & 0x1)) {seq(Rd = ((st32) ((ut8) mem_load_8(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 1)); + RzILOpPure *op_INV_12 = INV(NON_ZERO(op_AND_11)); + RzILOpEffect *branch_22 = BRANCH(op_INV_12, seq_then_20, seq_else_21); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_22); + return instruction_sequence; +} + +// if (!Pt) Rd = memub(Rx++Ii) +RzILOpEffect *hex_il_op_l2_ploadrubf_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_10 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_12 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rx_op, op_ADD_12); + + // Rd = ((st32) ((ut8) mem_load_8(EA))); + RzILOpPure *ml_EA_16 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_19 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(8, IL_FALSE, ml_EA_16))); + + // nop; + RzILOpEffect *nop_20 = NOP(); + + // seq(Rx = Rx + s; Rd = ((st32) ((ut8) mem_load_8(EA)))); + RzILOpEffect *seq_then_22 = SEQN(2, op_ASSIGN_13, op_ASSIGN_19); + + // seq(nop); + RzILOpEffect *seq_else_23 = nop_20; + + // if (! (((st32) Pt) & 0x1)) {seq(Rx = Rx + s; Rd = ((st32) ((ut8) mem_load_8(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_24 = BRANCH(op_INV_9, seq_then_22, seq_else_23); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_10, op_ASSIGN_3, branch_24); + return instruction_sequence; +} + +// if (!Pt.new) Rd = memub(Rs+Ii) +RzILOpEffect *hex_il_op_l2_ploadrubfnew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pt_new_op = ISA2REG(hi, 't', true); + RzILOpPure *Pt_new = READ_REG(pkt, Pt_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // Rd = ((st32) ((ut8) mem_load_8(EA))); + RzILOpPure *ml_EA_14 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(8, IL_FALSE, ml_EA_14))); + + // nop; + RzILOpEffect *nop_18 = NOP(); + + // seq(Rd = ((st32) ((ut8) mem_load_8(EA)))); + RzILOpEffect *seq_then_20 = op_ASSIGN_17; + + // seq(nop); + RzILOpEffect *seq_else_21 = nop_18; + + // if (! (((st32) Pt_new) & 0x1)) {seq(Rd = ((st32) ((ut8) mem_load_8(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pt_new), DUP(Pt_new)), SN(32, 1)); + RzILOpPure *op_INV_12 = INV(NON_ZERO(op_AND_11)); + RzILOpEffect *branch_22 = BRANCH(op_INV_12, seq_then_20, seq_else_21); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_22); + return instruction_sequence; +} + +// if (!Pt.new) Rd = memub(Rx++Ii) +RzILOpEffect *hex_il_op_l2_ploadrubfnew_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pt_new_op = ISA2REG(hi, 't', true); + RzILOpPure *Pt_new = READ_REG(pkt, Pt_new_op, true); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_10 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_12 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rx_op, op_ADD_12); + + // Rd = ((st32) ((ut8) mem_load_8(EA))); + RzILOpPure *ml_EA_16 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_19 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(8, IL_FALSE, ml_EA_16))); + + // nop; + RzILOpEffect *nop_20 = NOP(); + + // seq(Rx = Rx + s; Rd = ((st32) ((ut8) mem_load_8(EA)))); + RzILOpEffect *seq_then_22 = SEQN(2, op_ASSIGN_13, op_ASSIGN_19); + + // seq(nop); + RzILOpEffect *seq_else_23 = nop_20; + + // if (! (((st32) Pt_new) & 0x1)) {seq(Rx = Rx + s; Rd = ((st32) ((ut8) mem_load_8(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt_new), DUP(Pt_new)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_24 = BRANCH(op_INV_9, seq_then_22, seq_else_23); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_10, op_ASSIGN_3, branch_24); + return instruction_sequence; +} + +// if (Pt) Rd = memub(Rs+Ii) +RzILOpEffect *hex_il_op_l2_ploadrubt_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // Rd = ((st32) ((ut8) mem_load_8(EA))); + RzILOpPure *ml_EA_13 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_16 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(8, IL_FALSE, ml_EA_13))); + + // nop; + RzILOpEffect *nop_17 = NOP(); + + // seq(Rd = ((st32) ((ut8) mem_load_8(EA)))); + RzILOpEffect *seq_then_19 = op_ASSIGN_16; + + // seq(nop); + RzILOpEffect *seq_else_20 = nop_17; + + // if ((((st32) Pt) & 0x1)) {seq(Rd = ((st32) ((ut8) mem_load_8(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 1)); + RzILOpEffect *branch_21 = BRANCH(NON_ZERO(op_AND_11), seq_then_19, seq_else_20); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_21); + return instruction_sequence; +} + +// if (Pt) Rd = memub(Rx++Ii) +RzILOpEffect *hex_il_op_l2_ploadrubt_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_9 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_11 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rx_op, op_ADD_11); + + // Rd = ((st32) ((ut8) mem_load_8(EA))); + RzILOpPure *ml_EA_15 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(8, IL_FALSE, ml_EA_15))); + + // nop; + RzILOpEffect *nop_19 = NOP(); + + // seq(Rx = Rx + s; Rd = ((st32) ((ut8) mem_load_8(EA)))); + RzILOpEffect *seq_then_21 = SEQN(2, op_ASSIGN_12, op_ASSIGN_18); + + // seq(nop); + RzILOpEffect *seq_else_22 = nop_19; + + // if ((((st32) Pt) & 0x1)) {seq(Rx = Rx + s; Rd = ((st32) ((ut8) mem_load_8(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 1)); + RzILOpEffect *branch_23 = BRANCH(NON_ZERO(op_AND_8), seq_then_21, seq_else_22); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_9, op_ASSIGN_3, branch_23); + return instruction_sequence; +} + +// if (Pt.new) Rd = memub(Rs+Ii) +RzILOpEffect *hex_il_op_l2_ploadrubtnew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pt_new_op = ISA2REG(hi, 't', true); + RzILOpPure *Pt_new = READ_REG(pkt, Pt_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // Rd = ((st32) ((ut8) mem_load_8(EA))); + RzILOpPure *ml_EA_13 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_16 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(8, IL_FALSE, ml_EA_13))); + + // nop; + RzILOpEffect *nop_17 = NOP(); + + // seq(Rd = ((st32) ((ut8) mem_load_8(EA)))); + RzILOpEffect *seq_then_19 = op_ASSIGN_16; + + // seq(nop); + RzILOpEffect *seq_else_20 = nop_17; + + // if ((((st32) Pt_new) & 0x1)) {seq(Rd = ((st32) ((ut8) mem_load_8(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pt_new), DUP(Pt_new)), SN(32, 1)); + RzILOpEffect *branch_21 = BRANCH(NON_ZERO(op_AND_11), seq_then_19, seq_else_20); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_21); + return instruction_sequence; +} + +// if (Pt.new) Rd = memub(Rx++Ii) +RzILOpEffect *hex_il_op_l2_ploadrubtnew_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pt_new_op = ISA2REG(hi, 't', true); + RzILOpPure *Pt_new = READ_REG(pkt, Pt_new_op, true); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_9 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_11 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rx_op, op_ADD_11); + + // Rd = ((st32) ((ut8) mem_load_8(EA))); + RzILOpPure *ml_EA_15 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(8, IL_FALSE, ml_EA_15))); + + // nop; + RzILOpEffect *nop_19 = NOP(); + + // seq(Rx = Rx + s; Rd = ((st32) ((ut8) mem_load_8(EA)))); + RzILOpEffect *seq_then_21 = SEQN(2, op_ASSIGN_12, op_ASSIGN_18); + + // seq(nop); + RzILOpEffect *seq_else_22 = nop_19; + + // if ((((st32) Pt_new) & 0x1)) {seq(Rx = Rx + s; Rd = ((st32) ((ut8) mem_load_8(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt_new), DUP(Pt_new)), SN(32, 1)); + RzILOpEffect *branch_23 = BRANCH(NON_ZERO(op_AND_8), seq_then_21, seq_else_22); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_9, op_ASSIGN_3, branch_23); + return instruction_sequence; +} + +// if (!Pt) Rd = memuh(Rs+Ii) +RzILOpEffect *hex_il_op_l2_ploadruhf_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // Rd = ((st32) ((ut16) mem_load_16(EA))); + RzILOpPure *ml_EA_14 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(16, IL_FALSE, ml_EA_14))); + + // nop; + RzILOpEffect *nop_18 = NOP(); + + // seq(Rd = ((st32) ((ut16) mem_load_16(EA)))); + RzILOpEffect *seq_then_20 = op_ASSIGN_17; + + // seq(nop); + RzILOpEffect *seq_else_21 = nop_18; + + // if (! (((st32) Pt) & 0x1)) {seq(Rd = ((st32) ((ut16) mem_load_16(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 1)); + RzILOpPure *op_INV_12 = INV(NON_ZERO(op_AND_11)); + RzILOpEffect *branch_22 = BRANCH(op_INV_12, seq_then_20, seq_else_21); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_22); + return instruction_sequence; +} + +// if (!Pt) Rd = memuh(Rx++Ii) +RzILOpEffect *hex_il_op_l2_ploadruhf_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_10 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_12 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rx_op, op_ADD_12); + + // Rd = ((st32) ((ut16) mem_load_16(EA))); + RzILOpPure *ml_EA_16 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_19 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(16, IL_FALSE, ml_EA_16))); + + // nop; + RzILOpEffect *nop_20 = NOP(); + + // seq(Rx = Rx + s; Rd = ((st32) ((ut16) mem_load_16(EA)))); + RzILOpEffect *seq_then_22 = SEQN(2, op_ASSIGN_13, op_ASSIGN_19); + + // seq(nop); + RzILOpEffect *seq_else_23 = nop_20; + + // if (! (((st32) Pt) & 0x1)) {seq(Rx = Rx + s; Rd = ((st32) ((ut16) mem_load_16(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_24 = BRANCH(op_INV_9, seq_then_22, seq_else_23); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_10, op_ASSIGN_3, branch_24); + return instruction_sequence; +} + +// if (!Pt.new) Rd = memuh(Rs+Ii) +RzILOpEffect *hex_il_op_l2_ploadruhfnew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pt_new_op = ISA2REG(hi, 't', true); + RzILOpPure *Pt_new = READ_REG(pkt, Pt_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // Rd = ((st32) ((ut16) mem_load_16(EA))); + RzILOpPure *ml_EA_14 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(16, IL_FALSE, ml_EA_14))); + + // nop; + RzILOpEffect *nop_18 = NOP(); + + // seq(Rd = ((st32) ((ut16) mem_load_16(EA)))); + RzILOpEffect *seq_then_20 = op_ASSIGN_17; + + // seq(nop); + RzILOpEffect *seq_else_21 = nop_18; + + // if (! (((st32) Pt_new) & 0x1)) {seq(Rd = ((st32) ((ut16) mem_load_16(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pt_new), DUP(Pt_new)), SN(32, 1)); + RzILOpPure *op_INV_12 = INV(NON_ZERO(op_AND_11)); + RzILOpEffect *branch_22 = BRANCH(op_INV_12, seq_then_20, seq_else_21); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_22); + return instruction_sequence; +} + +// if (!Pt.new) Rd = memuh(Rx++Ii) +RzILOpEffect *hex_il_op_l2_ploadruhfnew_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pt_new_op = ISA2REG(hi, 't', true); + RzILOpPure *Pt_new = READ_REG(pkt, Pt_new_op, true); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_10 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_12 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rx_op, op_ADD_12); + + // Rd = ((st32) ((ut16) mem_load_16(EA))); + RzILOpPure *ml_EA_16 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_19 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(16, IL_FALSE, ml_EA_16))); + + // nop; + RzILOpEffect *nop_20 = NOP(); + + // seq(Rx = Rx + s; Rd = ((st32) ((ut16) mem_load_16(EA)))); + RzILOpEffect *seq_then_22 = SEQN(2, op_ASSIGN_13, op_ASSIGN_19); + + // seq(nop); + RzILOpEffect *seq_else_23 = nop_20; + + // if (! (((st32) Pt_new) & 0x1)) {seq(Rx = Rx + s; Rd = ((st32) ((ut16) mem_load_16(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt_new), DUP(Pt_new)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_24 = BRANCH(op_INV_9, seq_then_22, seq_else_23); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_10, op_ASSIGN_3, branch_24); + return instruction_sequence; +} + +// if (Pt) Rd = memuh(Rs+Ii) +RzILOpEffect *hex_il_op_l2_ploadruht_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // Rd = ((st32) ((ut16) mem_load_16(EA))); + RzILOpPure *ml_EA_13 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_16 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(16, IL_FALSE, ml_EA_13))); + + // nop; + RzILOpEffect *nop_17 = NOP(); + + // seq(Rd = ((st32) ((ut16) mem_load_16(EA)))); + RzILOpEffect *seq_then_19 = op_ASSIGN_16; + + // seq(nop); + RzILOpEffect *seq_else_20 = nop_17; + + // if ((((st32) Pt) & 0x1)) {seq(Rd = ((st32) ((ut16) mem_load_16(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 1)); + RzILOpEffect *branch_21 = BRANCH(NON_ZERO(op_AND_11), seq_then_19, seq_else_20); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_21); + return instruction_sequence; +} + +// if (Pt) Rd = memuh(Rx++Ii) +RzILOpEffect *hex_il_op_l2_ploadruht_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_9 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_11 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rx_op, op_ADD_11); + + // Rd = ((st32) ((ut16) mem_load_16(EA))); + RzILOpPure *ml_EA_15 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(16, IL_FALSE, ml_EA_15))); + + // nop; + RzILOpEffect *nop_19 = NOP(); + + // seq(Rx = Rx + s; Rd = ((st32) ((ut16) mem_load_16(EA)))); + RzILOpEffect *seq_then_21 = SEQN(2, op_ASSIGN_12, op_ASSIGN_18); + + // seq(nop); + RzILOpEffect *seq_else_22 = nop_19; + + // if ((((st32) Pt) & 0x1)) {seq(Rx = Rx + s; Rd = ((st32) ((ut16) mem_load_16(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 1)); + RzILOpEffect *branch_23 = BRANCH(NON_ZERO(op_AND_8), seq_then_21, seq_else_22); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_9, op_ASSIGN_3, branch_23); + return instruction_sequence; +} + +// if (Pt.new) Rd = memuh(Rs+Ii) +RzILOpEffect *hex_il_op_l2_ploadruhtnew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pt_new_op = ISA2REG(hi, 't', true); + RzILOpPure *Pt_new = READ_REG(pkt, Pt_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // Rd = ((st32) ((ut16) mem_load_16(EA))); + RzILOpPure *ml_EA_13 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_16 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(16, IL_FALSE, ml_EA_13))); + + // nop; + RzILOpEffect *nop_17 = NOP(); + + // seq(Rd = ((st32) ((ut16) mem_load_16(EA)))); + RzILOpEffect *seq_then_19 = op_ASSIGN_16; + + // seq(nop); + RzILOpEffect *seq_else_20 = nop_17; + + // if ((((st32) Pt_new) & 0x1)) {seq(Rd = ((st32) ((ut16) mem_load_16(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pt_new), DUP(Pt_new)), SN(32, 1)); + RzILOpEffect *branch_21 = BRANCH(NON_ZERO(op_AND_11), seq_then_19, seq_else_20); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_21); + return instruction_sequence; +} + +// if (Pt.new) Rd = memuh(Rx++Ii) +RzILOpEffect *hex_il_op_l2_ploadruhtnew_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pt_new_op = ISA2REG(hi, 't', true); + RzILOpPure *Pt_new = READ_REG(pkt, Pt_new_op, true); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_9 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_11 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rx_op, op_ADD_11); + + // Rd = ((st32) ((ut16) mem_load_16(EA))); + RzILOpPure *ml_EA_15 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(16, IL_FALSE, ml_EA_15))); + + // nop; + RzILOpEffect *nop_19 = NOP(); + + // seq(Rx = Rx + s; Rd = ((st32) ((ut16) mem_load_16(EA)))); + RzILOpEffect *seq_then_21 = SEQN(2, op_ASSIGN_12, op_ASSIGN_18); + + // seq(nop); + RzILOpEffect *seq_else_22 = nop_19; + + // if ((((st32) Pt_new) & 0x1)) {seq(Rx = Rx + s; Rd = ((st32) ((ut16) mem_load_16(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt_new), DUP(Pt_new)), SN(32, 1)); + RzILOpEffect *branch_23 = BRANCH(NON_ZERO(op_AND_8), seq_then_21, seq_else_22); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_9, op_ASSIGN_3, branch_23); + return instruction_sequence; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_L4_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_L4_ops.c new file mode 100644 index 00000000000..8113b034abb --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_L4_ops.c @@ -0,0 +1,4498 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// memb(Rs+Ii) += Rt +RzILOpEffect *hex_il_op_l4_add_memopb_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + // Declare: st32 tmp; + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // tmp = ((st32) ((st8) mem_load_8(EA))); + RzILOpPure *ml_EA_9 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_12 = SETL("tmp", CAST(32, MSB(CAST(8, MSB(ml_EA_9), DUP(ml_EA_9))), CAST(8, MSB(DUP(ml_EA_9)), DUP(ml_EA_9)))); + + // tmp = tmp + Rt; + RzILOpPure *op_ADD_14 = ADD(VARL("tmp"), Rt); + RzILOpEffect *op_ASSIGN_ADD_15 = SETL("tmp", op_ADD_14); + + // mem_store_ut8(EA, ((ut8) tmp)); + RzILOpEffect *ms_cast_ut8_16_17 = STOREW(VARL("EA"), CAST(8, IL_FALSE, VARL("tmp"))); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_0, op_ASSIGN_6, op_ASSIGN_12, op_ASSIGN_ADD_15, ms_cast_ut8_16_17); + return instruction_sequence; +} + +// memh(Rs+Ii) += Rt +RzILOpEffect *hex_il_op_l4_add_memoph_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + // Declare: st32 tmp; + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // tmp = ((st32) ((st16) mem_load_16(EA))); + RzILOpPure *ml_EA_9 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_12 = SETL("tmp", CAST(32, MSB(CAST(16, MSB(ml_EA_9), DUP(ml_EA_9))), CAST(16, MSB(DUP(ml_EA_9)), DUP(ml_EA_9)))); + + // tmp = tmp + Rt; + RzILOpPure *op_ADD_14 = ADD(VARL("tmp"), Rt); + RzILOpEffect *op_ASSIGN_ADD_15 = SETL("tmp", op_ADD_14); + + // mem_store_ut16(EA, ((ut16) tmp)); + RzILOpEffect *ms_cast_ut16_16_17 = STOREW(VARL("EA"), CAST(16, IL_FALSE, VARL("tmp"))); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_0, op_ASSIGN_6, op_ASSIGN_12, op_ASSIGN_ADD_15, ms_cast_ut16_16_17); + return instruction_sequence; +} + +// memw(Rs+Ii) += Rt +RzILOpEffect *hex_il_op_l4_add_memopw_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + // Declare: st32 tmp; + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // tmp = ((st32) mem_load_32(EA)); + RzILOpPure *ml_EA_9 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_11 = SETL("tmp", CAST(32, MSB(ml_EA_9), DUP(ml_EA_9))); + + // tmp = tmp + Rt; + RzILOpPure *op_ADD_13 = ADD(VARL("tmp"), Rt); + RzILOpEffect *op_ASSIGN_ADD_14 = SETL("tmp", op_ADD_13); + + // mem_store_ut32(EA, ((ut32) tmp)); + RzILOpEffect *ms_cast_ut32_15_16 = STOREW(VARL("EA"), CAST(32, IL_FALSE, VARL("tmp"))); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_0, op_ASSIGN_6, op_ASSIGN_11, op_ASSIGN_ADD_14, ms_cast_ut32_15_16); + return instruction_sequence; +} + +// memb(Rs+Ii) &= Rt +RzILOpEffect *hex_il_op_l4_and_memopb_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + // Declare: st32 tmp; + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // tmp = ((st32) ((st8) mem_load_8(EA))); + RzILOpPure *ml_EA_9 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_12 = SETL("tmp", CAST(32, MSB(CAST(8, MSB(ml_EA_9), DUP(ml_EA_9))), CAST(8, MSB(DUP(ml_EA_9)), DUP(ml_EA_9)))); + + // tmp = (tmp & Rt); + RzILOpPure *op_AND_14 = LOGAND(VARL("tmp"), Rt); + RzILOpEffect *op_ASSIGN_AND_15 = SETL("tmp", op_AND_14); + + // mem_store_ut8(EA, ((ut8) tmp)); + RzILOpEffect *ms_cast_ut8_16_17 = STOREW(VARL("EA"), CAST(8, IL_FALSE, VARL("tmp"))); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_0, op_ASSIGN_6, op_ASSIGN_12, op_ASSIGN_AND_15, ms_cast_ut8_16_17); + return instruction_sequence; +} + +// memh(Rs+Ii) &= Rt +RzILOpEffect *hex_il_op_l4_and_memoph_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + // Declare: st32 tmp; + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // tmp = ((st32) ((st16) mem_load_16(EA))); + RzILOpPure *ml_EA_9 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_12 = SETL("tmp", CAST(32, MSB(CAST(16, MSB(ml_EA_9), DUP(ml_EA_9))), CAST(16, MSB(DUP(ml_EA_9)), DUP(ml_EA_9)))); + + // tmp = (tmp & Rt); + RzILOpPure *op_AND_14 = LOGAND(VARL("tmp"), Rt); + RzILOpEffect *op_ASSIGN_AND_15 = SETL("tmp", op_AND_14); + + // mem_store_ut16(EA, ((ut16) tmp)); + RzILOpEffect *ms_cast_ut16_16_17 = STOREW(VARL("EA"), CAST(16, IL_FALSE, VARL("tmp"))); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_0, op_ASSIGN_6, op_ASSIGN_12, op_ASSIGN_AND_15, ms_cast_ut16_16_17); + return instruction_sequence; +} + +// memw(Rs+Ii) &= Rt +RzILOpEffect *hex_il_op_l4_and_memopw_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + // Declare: st32 tmp; + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // tmp = ((st32) mem_load_32(EA)); + RzILOpPure *ml_EA_9 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_11 = SETL("tmp", CAST(32, MSB(ml_EA_9), DUP(ml_EA_9))); + + // tmp = (tmp & Rt); + RzILOpPure *op_AND_13 = LOGAND(VARL("tmp"), Rt); + RzILOpEffect *op_ASSIGN_AND_14 = SETL("tmp", op_AND_13); + + // mem_store_ut32(EA, ((ut32) tmp)); + RzILOpEffect *ms_cast_ut32_15_16 = STOREW(VARL("EA"), CAST(32, IL_FALSE, VARL("tmp"))); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_0, op_ASSIGN_6, op_ASSIGN_11, op_ASSIGN_AND_14, ms_cast_ut32_15_16); + return instruction_sequence; +} + +// memb(Rs+Ii) += II +RzILOpEffect *hex_il_op_l4_iadd_memopb_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + // Declare: st32 tmp; + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // tmp = ((st32) ((st8) mem_load_8(EA))); + RzILOpPure *ml_EA_9 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_12 = SETL("tmp", CAST(32, MSB(CAST(8, MSB(ml_EA_9), DUP(ml_EA_9))), CAST(8, MSB(DUP(ml_EA_9)), DUP(ml_EA_9)))); + + // U = U; + RzILOpEffect *imm_assign_13 = SETL("U", U); + + // tmp = tmp + ((st32) U); + RzILOpPure *op_ADD_16 = ADD(VARL("tmp"), CAST(32, IL_FALSE, VARL("U"))); + RzILOpEffect *op_ASSIGN_ADD_17 = SETL("tmp", op_ADD_16); + + // mem_store_ut8(EA, ((ut8) tmp)); + RzILOpEffect *ms_cast_ut8_18_19 = STOREW(VARL("EA"), CAST(8, IL_FALSE, VARL("tmp"))); + + RzILOpEffect *instruction_sequence = SEQN(6, imm_assign_0, imm_assign_13, op_ASSIGN_6, op_ASSIGN_12, op_ASSIGN_ADD_17, ms_cast_ut8_18_19); + return instruction_sequence; +} + +// memh(Rs+Ii) += II +RzILOpEffect *hex_il_op_l4_iadd_memoph_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + // Declare: st32 tmp; + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // tmp = ((st32) ((st16) mem_load_16(EA))); + RzILOpPure *ml_EA_9 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_12 = SETL("tmp", CAST(32, MSB(CAST(16, MSB(ml_EA_9), DUP(ml_EA_9))), CAST(16, MSB(DUP(ml_EA_9)), DUP(ml_EA_9)))); + + // U = U; + RzILOpEffect *imm_assign_13 = SETL("U", U); + + // tmp = tmp + ((st32) U); + RzILOpPure *op_ADD_16 = ADD(VARL("tmp"), CAST(32, IL_FALSE, VARL("U"))); + RzILOpEffect *op_ASSIGN_ADD_17 = SETL("tmp", op_ADD_16); + + // mem_store_ut16(EA, ((ut16) tmp)); + RzILOpEffect *ms_cast_ut16_18_19 = STOREW(VARL("EA"), CAST(16, IL_FALSE, VARL("tmp"))); + + RzILOpEffect *instruction_sequence = SEQN(6, imm_assign_0, imm_assign_13, op_ASSIGN_6, op_ASSIGN_12, op_ASSIGN_ADD_17, ms_cast_ut16_18_19); + return instruction_sequence; +} + +// memw(Rs+Ii) += II +RzILOpEffect *hex_il_op_l4_iadd_memopw_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + // Declare: st32 tmp; + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // tmp = ((st32) mem_load_32(EA)); + RzILOpPure *ml_EA_9 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_11 = SETL("tmp", CAST(32, MSB(ml_EA_9), DUP(ml_EA_9))); + + // U = U; + RzILOpEffect *imm_assign_12 = SETL("U", U); + + // tmp = tmp + ((st32) U); + RzILOpPure *op_ADD_15 = ADD(VARL("tmp"), CAST(32, IL_FALSE, VARL("U"))); + RzILOpEffect *op_ASSIGN_ADD_16 = SETL("tmp", op_ADD_15); + + // mem_store_ut32(EA, ((ut32) tmp)); + RzILOpEffect *ms_cast_ut32_17_18 = STOREW(VARL("EA"), CAST(32, IL_FALSE, VARL("tmp"))); + + RzILOpEffect *instruction_sequence = SEQN(6, imm_assign_0, imm_assign_12, op_ASSIGN_6, op_ASSIGN_11, op_ASSIGN_ADD_16, ms_cast_ut32_17_18); + return instruction_sequence; +} + +// memb(Rs+Ii) = clrbit(II) +RzILOpEffect *hex_il_op_l4_iand_memopb_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + // Declare: st32 tmp; + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // tmp = ((st32) ((st8) mem_load_8(EA))); + RzILOpPure *ml_EA_9 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_12 = SETL("tmp", CAST(32, MSB(CAST(8, MSB(ml_EA_9), DUP(ml_EA_9))), CAST(8, MSB(DUP(ml_EA_9)), DUP(ml_EA_9)))); + + // U = U; + RzILOpEffect *imm_assign_14 = SETL("U", U); + + // tmp = (tmp & (~(0x1 << U))); + RzILOpPure *op_LSHIFT_16 = SHIFTL0(SN(32, 1), VARL("U")); + RzILOpPure *op_NOT_17 = LOGNOT(op_LSHIFT_16); + RzILOpPure *op_AND_18 = LOGAND(VARL("tmp"), op_NOT_17); + RzILOpEffect *op_ASSIGN_AND_19 = SETL("tmp", op_AND_18); + + // mem_store_ut8(EA, ((ut8) tmp)); + RzILOpEffect *ms_cast_ut8_20_21 = STOREW(VARL("EA"), CAST(8, IL_FALSE, VARL("tmp"))); + + RzILOpEffect *instruction_sequence = SEQN(6, imm_assign_0, imm_assign_14, op_ASSIGN_6, op_ASSIGN_12, op_ASSIGN_AND_19, ms_cast_ut8_20_21); + return instruction_sequence; +} + +// memh(Rs+Ii) = clrbit(II) +RzILOpEffect *hex_il_op_l4_iand_memoph_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + // Declare: st32 tmp; + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // tmp = ((st32) ((st16) mem_load_16(EA))); + RzILOpPure *ml_EA_9 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_12 = SETL("tmp", CAST(32, MSB(CAST(16, MSB(ml_EA_9), DUP(ml_EA_9))), CAST(16, MSB(DUP(ml_EA_9)), DUP(ml_EA_9)))); + + // U = U; + RzILOpEffect *imm_assign_14 = SETL("U", U); + + // tmp = (tmp & (~(0x1 << U))); + RzILOpPure *op_LSHIFT_16 = SHIFTL0(SN(32, 1), VARL("U")); + RzILOpPure *op_NOT_17 = LOGNOT(op_LSHIFT_16); + RzILOpPure *op_AND_18 = LOGAND(VARL("tmp"), op_NOT_17); + RzILOpEffect *op_ASSIGN_AND_19 = SETL("tmp", op_AND_18); + + // mem_store_ut16(EA, ((ut16) tmp)); + RzILOpEffect *ms_cast_ut16_20_21 = STOREW(VARL("EA"), CAST(16, IL_FALSE, VARL("tmp"))); + + RzILOpEffect *instruction_sequence = SEQN(6, imm_assign_0, imm_assign_14, op_ASSIGN_6, op_ASSIGN_12, op_ASSIGN_AND_19, ms_cast_ut16_20_21); + return instruction_sequence; +} + +// memw(Rs+Ii) = clrbit(II) +RzILOpEffect *hex_il_op_l4_iand_memopw_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + // Declare: st32 tmp; + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // tmp = ((st32) mem_load_32(EA)); + RzILOpPure *ml_EA_9 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_11 = SETL("tmp", CAST(32, MSB(ml_EA_9), DUP(ml_EA_9))); + + // U = U; + RzILOpEffect *imm_assign_13 = SETL("U", U); + + // tmp = (tmp & (~(0x1 << U))); + RzILOpPure *op_LSHIFT_15 = SHIFTL0(SN(32, 1), VARL("U")); + RzILOpPure *op_NOT_16 = LOGNOT(op_LSHIFT_15); + RzILOpPure *op_AND_17 = LOGAND(VARL("tmp"), op_NOT_16); + RzILOpEffect *op_ASSIGN_AND_18 = SETL("tmp", op_AND_17); + + // mem_store_ut32(EA, ((ut32) tmp)); + RzILOpEffect *ms_cast_ut32_19_20 = STOREW(VARL("EA"), CAST(32, IL_FALSE, VARL("tmp"))); + + RzILOpEffect *instruction_sequence = SEQN(6, imm_assign_0, imm_assign_13, op_ASSIGN_6, op_ASSIGN_11, op_ASSIGN_AND_18, ms_cast_ut32_19_20); + return instruction_sequence; +} + +// memb(Rs+Ii) = setbit(II) +RzILOpEffect *hex_il_op_l4_ior_memopb_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + // Declare: st32 tmp; + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // tmp = ((st32) ((st8) mem_load_8(EA))); + RzILOpPure *ml_EA_9 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_12 = SETL("tmp", CAST(32, MSB(CAST(8, MSB(ml_EA_9), DUP(ml_EA_9))), CAST(8, MSB(DUP(ml_EA_9)), DUP(ml_EA_9)))); + + // U = U; + RzILOpEffect *imm_assign_14 = SETL("U", U); + + // tmp = (tmp | (0x1 << U)); + RzILOpPure *op_LSHIFT_16 = SHIFTL0(SN(32, 1), VARL("U")); + RzILOpPure *op_OR_17 = LOGOR(VARL("tmp"), op_LSHIFT_16); + RzILOpEffect *op_ASSIGN_OR_18 = SETL("tmp", op_OR_17); + + // mem_store_ut8(EA, ((ut8) tmp)); + RzILOpEffect *ms_cast_ut8_19_20 = STOREW(VARL("EA"), CAST(8, IL_FALSE, VARL("tmp"))); + + RzILOpEffect *instruction_sequence = SEQN(6, imm_assign_0, imm_assign_14, op_ASSIGN_6, op_ASSIGN_12, op_ASSIGN_OR_18, ms_cast_ut8_19_20); + return instruction_sequence; +} + +// memh(Rs+Ii) = setbit(II) +RzILOpEffect *hex_il_op_l4_ior_memoph_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + // Declare: st32 tmp; + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // tmp = ((st32) ((st16) mem_load_16(EA))); + RzILOpPure *ml_EA_9 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_12 = SETL("tmp", CAST(32, MSB(CAST(16, MSB(ml_EA_9), DUP(ml_EA_9))), CAST(16, MSB(DUP(ml_EA_9)), DUP(ml_EA_9)))); + + // U = U; + RzILOpEffect *imm_assign_14 = SETL("U", U); + + // tmp = (tmp | (0x1 << U)); + RzILOpPure *op_LSHIFT_16 = SHIFTL0(SN(32, 1), VARL("U")); + RzILOpPure *op_OR_17 = LOGOR(VARL("tmp"), op_LSHIFT_16); + RzILOpEffect *op_ASSIGN_OR_18 = SETL("tmp", op_OR_17); + + // mem_store_ut16(EA, ((ut16) tmp)); + RzILOpEffect *ms_cast_ut16_19_20 = STOREW(VARL("EA"), CAST(16, IL_FALSE, VARL("tmp"))); + + RzILOpEffect *instruction_sequence = SEQN(6, imm_assign_0, imm_assign_14, op_ASSIGN_6, op_ASSIGN_12, op_ASSIGN_OR_18, ms_cast_ut16_19_20); + return instruction_sequence; +} + +// memw(Rs+Ii) = setbit(II) +RzILOpEffect *hex_il_op_l4_ior_memopw_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + // Declare: st32 tmp; + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // tmp = ((st32) mem_load_32(EA)); + RzILOpPure *ml_EA_9 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_11 = SETL("tmp", CAST(32, MSB(ml_EA_9), DUP(ml_EA_9))); + + // U = U; + RzILOpEffect *imm_assign_13 = SETL("U", U); + + // tmp = (tmp | (0x1 << U)); + RzILOpPure *op_LSHIFT_15 = SHIFTL0(SN(32, 1), VARL("U")); + RzILOpPure *op_OR_16 = LOGOR(VARL("tmp"), op_LSHIFT_15); + RzILOpEffect *op_ASSIGN_OR_17 = SETL("tmp", op_OR_16); + + // mem_store_ut32(EA, ((ut32) tmp)); + RzILOpEffect *ms_cast_ut32_18_19 = STOREW(VARL("EA"), CAST(32, IL_FALSE, VARL("tmp"))); + + RzILOpEffect *instruction_sequence = SEQN(6, imm_assign_0, imm_assign_13, op_ASSIGN_6, op_ASSIGN_11, op_ASSIGN_OR_17, ms_cast_ut32_18_19); + return instruction_sequence; +} + +// memb(Rs+Ii) -= II +RzILOpEffect *hex_il_op_l4_isub_memopb_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + // Declare: st32 tmp; + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // tmp = ((st32) ((st8) mem_load_8(EA))); + RzILOpPure *ml_EA_9 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_12 = SETL("tmp", CAST(32, MSB(CAST(8, MSB(ml_EA_9), DUP(ml_EA_9))), CAST(8, MSB(DUP(ml_EA_9)), DUP(ml_EA_9)))); + + // U = U; + RzILOpEffect *imm_assign_13 = SETL("U", U); + + // tmp = tmp - ((st32) U); + RzILOpPure *op_SUB_16 = SUB(VARL("tmp"), CAST(32, IL_FALSE, VARL("U"))); + RzILOpEffect *op_ASSIGN_SUB_17 = SETL("tmp", op_SUB_16); + + // mem_store_ut8(EA, ((ut8) tmp)); + RzILOpEffect *ms_cast_ut8_18_19 = STOREW(VARL("EA"), CAST(8, IL_FALSE, VARL("tmp"))); + + RzILOpEffect *instruction_sequence = SEQN(6, imm_assign_0, imm_assign_13, op_ASSIGN_6, op_ASSIGN_12, op_ASSIGN_SUB_17, ms_cast_ut8_18_19); + return instruction_sequence; +} + +// memh(Rs+Ii) -= II +RzILOpEffect *hex_il_op_l4_isub_memoph_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + // Declare: st32 tmp; + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // tmp = ((st32) ((st16) mem_load_16(EA))); + RzILOpPure *ml_EA_9 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_12 = SETL("tmp", CAST(32, MSB(CAST(16, MSB(ml_EA_9), DUP(ml_EA_9))), CAST(16, MSB(DUP(ml_EA_9)), DUP(ml_EA_9)))); + + // U = U; + RzILOpEffect *imm_assign_13 = SETL("U", U); + + // tmp = tmp - ((st32) U); + RzILOpPure *op_SUB_16 = SUB(VARL("tmp"), CAST(32, IL_FALSE, VARL("U"))); + RzILOpEffect *op_ASSIGN_SUB_17 = SETL("tmp", op_SUB_16); + + // mem_store_ut16(EA, ((ut16) tmp)); + RzILOpEffect *ms_cast_ut16_18_19 = STOREW(VARL("EA"), CAST(16, IL_FALSE, VARL("tmp"))); + + RzILOpEffect *instruction_sequence = SEQN(6, imm_assign_0, imm_assign_13, op_ASSIGN_6, op_ASSIGN_12, op_ASSIGN_SUB_17, ms_cast_ut16_18_19); + return instruction_sequence; +} + +// memw(Rs+Ii) -= II +RzILOpEffect *hex_il_op_l4_isub_memopw_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + // Declare: st32 tmp; + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // tmp = ((st32) mem_load_32(EA)); + RzILOpPure *ml_EA_9 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_11 = SETL("tmp", CAST(32, MSB(ml_EA_9), DUP(ml_EA_9))); + + // U = U; + RzILOpEffect *imm_assign_12 = SETL("U", U); + + // tmp = tmp - ((st32) U); + RzILOpPure *op_SUB_15 = SUB(VARL("tmp"), CAST(32, IL_FALSE, VARL("U"))); + RzILOpEffect *op_ASSIGN_SUB_16 = SETL("tmp", op_SUB_15); + + // mem_store_ut32(EA, ((ut32) tmp)); + RzILOpEffect *ms_cast_ut32_17_18 = STOREW(VARL("EA"), CAST(32, IL_FALSE, VARL("tmp"))); + + RzILOpEffect *instruction_sequence = SEQN(6, imm_assign_0, imm_assign_12, op_ASSIGN_6, op_ASSIGN_11, op_ASSIGN_SUB_16, ms_cast_ut32_17_18); + return instruction_sequence; +} + +// Ryy = memb_fifo(Re=II) +RzILOpEffect *hex_il_op_l4_loadalignb_ap(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + // Declare: ut64 tmpV; + const HexOp *Ryy_op = ISA2REG(hi, 'y', false); + RzILOpPure *Ryy = READ_REG(pkt, Ryy_op, false); + const HexOp *Re_op = ISA2REG(hi, 'e', false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // EA = U; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("U")); + + // tmpV = ((ut64) ((ut8) mem_load_8(EA))); + RzILOpPure *ml_EA_6 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_9 = SETL("tmpV", CAST(64, IL_FALSE, CAST(8, IL_FALSE, ml_EA_6))); + + // Ryy = ((st64) ((((ut64) Ryy) >> 0x8) | (tmpV << 0x38))); + RzILOpPure *op_RSHIFT_13 = SHIFTR0(CAST(64, IL_FALSE, Ryy), SN(32, 8)); + RzILOpPure *op_LSHIFT_15 = SHIFTL0(VARL("tmpV"), SN(32, 0x38)); + RzILOpPure *op_OR_16 = LOGOR(op_RSHIFT_13, op_LSHIFT_15); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Ryy_op, CAST(64, IL_FALSE, op_OR_16)); + + // Re = ((st32) U); + RzILOpEffect *op_ASSIGN_21 = WRITE_REG(bundle, Re_op, CAST(32, IL_FALSE, VARL("U"))); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_0, op_ASSIGN_3, op_ASSIGN_9, op_ASSIGN_18, op_ASSIGN_21); + return instruction_sequence; +} + +// Ryy = memb_fifo(Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut64 tmpV; + const HexOp *Ryy_op = ISA2REG(hi, 'y', false); + RzILOpPure *Ryy = READ_REG(pkt, Ryy_op, false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // u = u; + RzILOpEffect *imm_assign_4 = SETL("u", u); + + // EA = U + ((ut32) (Rt << u)); + RzILOpPure *op_LSHIFT_6 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_8 = ADD(VARL("U"), CAST(32, IL_FALSE, op_LSHIFT_6)); + RzILOpEffect *op_ASSIGN_9 = SETL("EA", op_ADD_8); + + // tmpV = ((ut64) ((ut8) mem_load_8(EA))); + RzILOpPure *ml_EA_12 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_15 = SETL("tmpV", CAST(64, IL_FALSE, CAST(8, IL_FALSE, ml_EA_12))); + + // Ryy = ((st64) ((((ut64) Ryy) >> 0x8) | (tmpV << 0x38))); + RzILOpPure *op_RSHIFT_19 = SHIFTR0(CAST(64, IL_FALSE, Ryy), SN(32, 8)); + RzILOpPure *op_LSHIFT_21 = SHIFTL0(VARL("tmpV"), SN(32, 0x38)); + RzILOpPure *op_OR_22 = LOGOR(op_RSHIFT_19, op_LSHIFT_21); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Ryy_op, CAST(64, IL_FALSE, op_OR_22)); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_0, imm_assign_4, op_ASSIGN_9, op_ASSIGN_15, op_ASSIGN_24); + return instruction_sequence; +} + +// Ryy = memh_fifo(Re=II) +RzILOpEffect *hex_il_op_l4_loadalignh_ap(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + // Declare: ut64 tmpV; + const HexOp *Ryy_op = ISA2REG(hi, 'y', false); + RzILOpPure *Ryy = READ_REG(pkt, Ryy_op, false); + const HexOp *Re_op = ISA2REG(hi, 'e', false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // EA = U; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("U")); + + // tmpV = ((ut64) ((ut16) mem_load_16(EA))); + RzILOpPure *ml_EA_6 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_9 = SETL("tmpV", CAST(64, IL_FALSE, CAST(16, IL_FALSE, ml_EA_6))); + + // Ryy = ((st64) ((((ut64) Ryy) >> 0x10) | (tmpV << 0x30))); + RzILOpPure *op_RSHIFT_13 = SHIFTR0(CAST(64, IL_FALSE, Ryy), SN(32, 16)); + RzILOpPure *op_LSHIFT_15 = SHIFTL0(VARL("tmpV"), SN(32, 0x30)); + RzILOpPure *op_OR_16 = LOGOR(op_RSHIFT_13, op_LSHIFT_15); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Ryy_op, CAST(64, IL_FALSE, op_OR_16)); + + // Re = ((st32) U); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Re_op, CAST(32, IL_FALSE, VARL("U"))); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_0, op_ASSIGN_3, op_ASSIGN_9, op_ASSIGN_18, op_ASSIGN_22); + return instruction_sequence; +} + +// Ryy = memh_fifo(Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut64 tmpV; + const HexOp *Ryy_op = ISA2REG(hi, 'y', false); + RzILOpPure *Ryy = READ_REG(pkt, Ryy_op, false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // u = u; + RzILOpEffect *imm_assign_4 = SETL("u", u); + + // EA = U + ((ut32) (Rt << u)); + RzILOpPure *op_LSHIFT_6 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_8 = ADD(VARL("U"), CAST(32, IL_FALSE, op_LSHIFT_6)); + RzILOpEffect *op_ASSIGN_9 = SETL("EA", op_ADD_8); + + // tmpV = ((ut64) ((ut16) mem_load_16(EA))); + RzILOpPure *ml_EA_12 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_15 = SETL("tmpV", CAST(64, IL_FALSE, CAST(16, IL_FALSE, ml_EA_12))); + + // Ryy = ((st64) ((((ut64) Ryy) >> 0x10) | (tmpV << 0x30))); + RzILOpPure *op_RSHIFT_19 = SHIFTR0(CAST(64, IL_FALSE, Ryy), SN(32, 16)); + RzILOpPure *op_LSHIFT_21 = SHIFTL0(VARL("tmpV"), SN(32, 0x30)); + RzILOpPure *op_OR_22 = LOGOR(op_RSHIFT_19, op_LSHIFT_21); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Ryy_op, CAST(64, IL_FALSE, op_OR_22)); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_0, imm_assign_4, op_ASSIGN_9, op_ASSIGN_15, op_ASSIGN_24); + return instruction_sequence; +} + +// Rd = membh(Re=II) +RzILOpEffect *hex_il_op_l4_loadbsw2_ap(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + // Declare: ut16 tmpV; + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Re_op = ISA2REG(hi, 'e', false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // EA = U; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("U")); + + // tmpV = ((ut16) mem_load_16(EA)); + RzILOpPure *ml_EA_7 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_9 = SETL("tmpV", CAST(16, IL_FALSE, ml_EA_7)); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_11 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_14 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp224 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_16 = SETL("h_tmp224", VARL("i")); + + // seq(h_tmp224 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_17 = SEQN(2, op_ASSIGN_hybrid_tmp_16, op_INC_14); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((st8) (((st32) (tmpV >> i * 0x8)) & 0xff))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_21 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(SN(64, 0xffff), op_MUL_21); + RzILOpPure *op_NOT_23 = LOGNOT(op_LSHIFT_22); + RzILOpPure *op_AND_25 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_23); + RzILOpPure *op_MUL_27 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_28 = SHIFTR0(VARL("tmpV"), op_MUL_27); + RzILOpPure *op_AND_31 = LOGAND(CAST(32, IL_FALSE, op_RSHIFT_28), SN(32, 0xff)); + RzILOpPure *op_AND_35 = LOGAND(CAST(32, MSB(CAST(8, MSB(op_AND_31), DUP(op_AND_31))), CAST(8, MSB(DUP(op_AND_31)), DUP(op_AND_31))), SN(32, 0xffff)); + RzILOpPure *op_MUL_38 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_39 = SHIFTL0(CAST(64, IL_FALSE, op_AND_35), op_MUL_38); + RzILOpPure *op_OR_41 = LOGOR(CAST(64, IL_FALSE, op_AND_25), op_LSHIFT_39); + RzILOpEffect *op_ASSIGN_43 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_41)); + + // seq(h_tmp224; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << ...; + RzILOpEffect *seq_45 = op_ASSIGN_43; + + // seq(seq(h_tmp224; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ...; + RzILOpEffect *seq_46 = SEQN(2, seq_45, seq_17); + + // while ((i < 0x2)) { seq(seq(h_tmp224; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ... }; + RzILOpPure *op_LT_13 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_47 = REPEAT(op_LT_13, seq_46); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp224; Rd = ((st32) ...; + RzILOpEffect *seq_48 = SEQN(2, op_ASSIGN_11, for_47); + + // Re = ((st32) U); + RzILOpEffect *op_ASSIGN_51 = WRITE_REG(bundle, Re_op, CAST(32, IL_FALSE, VARL("U"))); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_0, op_ASSIGN_3, op_ASSIGN_9, seq_48, op_ASSIGN_51); + return instruction_sequence; +} + +// Rd = membh(Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut16 tmpV; + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // u = u; + RzILOpEffect *imm_assign_4 = SETL("u", u); + + // EA = U + ((ut32) (Rt << u)); + RzILOpPure *op_LSHIFT_6 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_8 = ADD(VARL("U"), CAST(32, IL_FALSE, op_LSHIFT_6)); + RzILOpEffect *op_ASSIGN_9 = SETL("EA", op_ADD_8); + + // tmpV = ((ut16) mem_load_16(EA)); + RzILOpPure *ml_EA_13 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_15 = SETL("tmpV", CAST(16, IL_FALSE, ml_EA_13)); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_17 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_20 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp225 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_22 = SETL("h_tmp225", VARL("i")); + + // seq(h_tmp225 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_23 = SEQN(2, op_ASSIGN_hybrid_tmp_22, op_INC_20); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((st8) (((st32) (tmpV >> i * 0x8)) & 0xff))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_27 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_28 = SHIFTL0(SN(64, 0xffff), op_MUL_27); + RzILOpPure *op_NOT_29 = LOGNOT(op_LSHIFT_28); + RzILOpPure *op_AND_31 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_29); + RzILOpPure *op_MUL_33 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_34 = SHIFTR0(VARL("tmpV"), op_MUL_33); + RzILOpPure *op_AND_37 = LOGAND(CAST(32, IL_FALSE, op_RSHIFT_34), SN(32, 0xff)); + RzILOpPure *op_AND_41 = LOGAND(CAST(32, MSB(CAST(8, MSB(op_AND_37), DUP(op_AND_37))), CAST(8, MSB(DUP(op_AND_37)), DUP(op_AND_37))), SN(32, 0xffff)); + RzILOpPure *op_MUL_44 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_45 = SHIFTL0(CAST(64, IL_FALSE, op_AND_41), op_MUL_44); + RzILOpPure *op_OR_47 = LOGOR(CAST(64, IL_FALSE, op_AND_31), op_LSHIFT_45); + RzILOpEffect *op_ASSIGN_49 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_47)); + + // seq(h_tmp225; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << ...; + RzILOpEffect *seq_51 = op_ASSIGN_49; + + // seq(seq(h_tmp225; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ...; + RzILOpEffect *seq_52 = SEQN(2, seq_51, seq_23); + + // while ((i < 0x2)) { seq(seq(h_tmp225; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ... }; + RzILOpPure *op_LT_19 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_53 = REPEAT(op_LT_19, seq_52); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp225; Rd = ((st32) ...; + RzILOpEffect *seq_54 = SEQN(2, op_ASSIGN_17, for_53); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_0, imm_assign_4, op_ASSIGN_9, op_ASSIGN_15, seq_54); + return instruction_sequence; +} + +// Rdd = membh(Re=II) +RzILOpEffect *hex_il_op_l4_loadbsw4_ap(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + // Declare: ut32 tmpV; + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Re_op = ISA2REG(hi, 'e', false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // EA = U; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("U")); + + // tmpV = ((ut32) mem_load_32(EA)); + RzILOpPure *ml_EA_7 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_9 = SETL("tmpV", CAST(32, IL_FALSE, ml_EA_7)); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_11 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_14 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp226 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_16 = SETL("h_tmp226", VARL("i")); + + // seq(h_tmp226 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_17 = SEQN(2, op_ASSIGN_hybrid_tmp_16, op_INC_14); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((st8) ((tmpV >> i * 0x8) & ((ut32) 0xff)))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_21 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(SN(64, 0xffff), op_MUL_21); + RzILOpPure *op_NOT_23 = LOGNOT(op_LSHIFT_22); + RzILOpPure *op_AND_24 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_23); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_27 = SHIFTR0(VARL("tmpV"), op_MUL_26); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_27, CAST(32, IL_FALSE, SN(32, 0xff))); + RzILOpPure *op_AND_34 = LOGAND(CAST(32, MSB(CAST(8, IL_FALSE, op_AND_30)), CAST(8, IL_FALSE, DUP(op_AND_30))), SN(32, 0xffff)); + RzILOpPure *op_MUL_37 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_38 = SHIFTL0(CAST(64, IL_FALSE, op_AND_34), op_MUL_37); + RzILOpPure *op_OR_40 = LOGOR(CAST(64, IL_FALSE, op_AND_24), op_LSHIFT_38); + RzILOpEffect *op_ASSIGN_42 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_40)); + + // seq(h_tmp226; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x1 ...; + RzILOpEffect *seq_44 = op_ASSIGN_42; + + // seq(seq(h_tmp226; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_45 = SEQN(2, seq_44, seq_17); + + // while ((i < 0x4)) { seq(seq(h_tmp226; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_13 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_46 = REPEAT(op_LT_13, seq_45); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp226; Rdd = ((st64) ...; + RzILOpEffect *seq_47 = SEQN(2, op_ASSIGN_11, for_46); + + // Re = ((st32) U); + RzILOpEffect *op_ASSIGN_50 = WRITE_REG(bundle, Re_op, CAST(32, IL_FALSE, VARL("U"))); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_0, op_ASSIGN_3, op_ASSIGN_9, seq_47, op_ASSIGN_50); + return instruction_sequence; +} + +// Rdd = membh(Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 tmpV; + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // u = u; + RzILOpEffect *imm_assign_4 = SETL("u", u); + + // EA = U + ((ut32) (Rt << u)); + RzILOpPure *op_LSHIFT_6 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_8 = ADD(VARL("U"), CAST(32, IL_FALSE, op_LSHIFT_6)); + RzILOpEffect *op_ASSIGN_9 = SETL("EA", op_ADD_8); + + // tmpV = ((ut32) mem_load_32(EA)); + RzILOpPure *ml_EA_13 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_15 = SETL("tmpV", CAST(32, IL_FALSE, ml_EA_13)); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_17 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_20 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp227 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_22 = SETL("h_tmp227", VARL("i")); + + // seq(h_tmp227 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_23 = SEQN(2, op_ASSIGN_hybrid_tmp_22, op_INC_20); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((st8) ((tmpV >> i * 0x8) & ((ut32) 0xff)))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_27 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_28 = SHIFTL0(SN(64, 0xffff), op_MUL_27); + RzILOpPure *op_NOT_29 = LOGNOT(op_LSHIFT_28); + RzILOpPure *op_AND_30 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_29); + RzILOpPure *op_MUL_32 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_33 = SHIFTR0(VARL("tmpV"), op_MUL_32); + RzILOpPure *op_AND_36 = LOGAND(op_RSHIFT_33, CAST(32, IL_FALSE, SN(32, 0xff))); + RzILOpPure *op_AND_40 = LOGAND(CAST(32, MSB(CAST(8, IL_FALSE, op_AND_36)), CAST(8, IL_FALSE, DUP(op_AND_36))), SN(32, 0xffff)); + RzILOpPure *op_MUL_43 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_44 = SHIFTL0(CAST(64, IL_FALSE, op_AND_40), op_MUL_43); + RzILOpPure *op_OR_46 = LOGOR(CAST(64, IL_FALSE, op_AND_30), op_LSHIFT_44); + RzILOpEffect *op_ASSIGN_48 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_46)); + + // seq(h_tmp227; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x1 ...; + RzILOpEffect *seq_50 = op_ASSIGN_48; + + // seq(seq(h_tmp227; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_51 = SEQN(2, seq_50, seq_23); + + // while ((i < 0x4)) { seq(seq(h_tmp227; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_19 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_52 = REPEAT(op_LT_19, seq_51); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp227; Rdd = ((st64) ...; + RzILOpEffect *seq_53 = SEQN(2, op_ASSIGN_17, for_52); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_0, imm_assign_4, op_ASSIGN_9, op_ASSIGN_15, seq_53); + return instruction_sequence; +} + +// Rd = memubh(Re=II) +RzILOpEffect *hex_il_op_l4_loadbzw2_ap(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + // Declare: ut16 tmpV; + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Re_op = ISA2REG(hi, 'e', false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // EA = U; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("U")); + + // tmpV = ((ut16) mem_load_16(EA)); + RzILOpPure *ml_EA_7 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_9 = SETL("tmpV", CAST(16, IL_FALSE, ml_EA_7)); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_11 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_14 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp228 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_16 = SETL("h_tmp228", VARL("i")); + + // seq(h_tmp228 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_17 = SEQN(2, op_ASSIGN_hybrid_tmp_16, op_INC_14); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((ut8) (((st32) (tmpV >> i * 0x8)) & 0xff))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_21 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(SN(64, 0xffff), op_MUL_21); + RzILOpPure *op_NOT_23 = LOGNOT(op_LSHIFT_22); + RzILOpPure *op_AND_25 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_23); + RzILOpPure *op_MUL_27 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_28 = SHIFTR0(VARL("tmpV"), op_MUL_27); + RzILOpPure *op_AND_31 = LOGAND(CAST(32, IL_FALSE, op_RSHIFT_28), SN(32, 0xff)); + RzILOpPure *op_AND_35 = LOGAND(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_31)), SN(32, 0xffff)); + RzILOpPure *op_MUL_38 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_39 = SHIFTL0(CAST(64, IL_FALSE, op_AND_35), op_MUL_38); + RzILOpPure *op_OR_41 = LOGOR(CAST(64, IL_FALSE, op_AND_25), op_LSHIFT_39); + RzILOpEffect *op_ASSIGN_43 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_41)); + + // seq(h_tmp228; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << ...; + RzILOpEffect *seq_45 = op_ASSIGN_43; + + // seq(seq(h_tmp228; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ...; + RzILOpEffect *seq_46 = SEQN(2, seq_45, seq_17); + + // while ((i < 0x2)) { seq(seq(h_tmp228; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ... }; + RzILOpPure *op_LT_13 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_47 = REPEAT(op_LT_13, seq_46); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp228; Rd = ((st32) ...; + RzILOpEffect *seq_48 = SEQN(2, op_ASSIGN_11, for_47); + + // Re = ((st32) U); + RzILOpEffect *op_ASSIGN_51 = WRITE_REG(bundle, Re_op, CAST(32, IL_FALSE, VARL("U"))); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_0, op_ASSIGN_3, op_ASSIGN_9, seq_48, op_ASSIGN_51); + return instruction_sequence; +} + +// Rd = memubh(Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut16 tmpV; + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // u = u; + RzILOpEffect *imm_assign_4 = SETL("u", u); + + // EA = U + ((ut32) (Rt << u)); + RzILOpPure *op_LSHIFT_6 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_8 = ADD(VARL("U"), CAST(32, IL_FALSE, op_LSHIFT_6)); + RzILOpEffect *op_ASSIGN_9 = SETL("EA", op_ADD_8); + + // tmpV = ((ut16) mem_load_16(EA)); + RzILOpPure *ml_EA_13 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_15 = SETL("tmpV", CAST(16, IL_FALSE, ml_EA_13)); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_17 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_20 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp229 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_22 = SETL("h_tmp229", VARL("i")); + + // seq(h_tmp229 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_23 = SEQN(2, op_ASSIGN_hybrid_tmp_22, op_INC_20); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((ut8) (((st32) (tmpV >> i * 0x8)) & 0xff))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_27 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_28 = SHIFTL0(SN(64, 0xffff), op_MUL_27); + RzILOpPure *op_NOT_29 = LOGNOT(op_LSHIFT_28); + RzILOpPure *op_AND_31 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_29); + RzILOpPure *op_MUL_33 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_34 = SHIFTR0(VARL("tmpV"), op_MUL_33); + RzILOpPure *op_AND_37 = LOGAND(CAST(32, IL_FALSE, op_RSHIFT_34), SN(32, 0xff)); + RzILOpPure *op_AND_41 = LOGAND(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_37)), SN(32, 0xffff)); + RzILOpPure *op_MUL_44 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_45 = SHIFTL0(CAST(64, IL_FALSE, op_AND_41), op_MUL_44); + RzILOpPure *op_OR_47 = LOGOR(CAST(64, IL_FALSE, op_AND_31), op_LSHIFT_45); + RzILOpEffect *op_ASSIGN_49 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_47)); + + // seq(h_tmp229; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << ...; + RzILOpEffect *seq_51 = op_ASSIGN_49; + + // seq(seq(h_tmp229; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ...; + RzILOpEffect *seq_52 = SEQN(2, seq_51, seq_23); + + // while ((i < 0x2)) { seq(seq(h_tmp229; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ... }; + RzILOpPure *op_LT_19 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_53 = REPEAT(op_LT_19, seq_52); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp229; Rd = ((st32) ...; + RzILOpEffect *seq_54 = SEQN(2, op_ASSIGN_17, for_53); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_0, imm_assign_4, op_ASSIGN_9, op_ASSIGN_15, seq_54); + return instruction_sequence; +} + +// Rdd = memubh(Re=II) +RzILOpEffect *hex_il_op_l4_loadbzw4_ap(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + // Declare: ut32 tmpV; + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Re_op = ISA2REG(hi, 'e', false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // EA = U; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("U")); + + // tmpV = ((ut32) mem_load_32(EA)); + RzILOpPure *ml_EA_7 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_9 = SETL("tmpV", CAST(32, IL_FALSE, ml_EA_7)); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_11 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_14 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp230 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_16 = SETL("h_tmp230", VARL("i")); + + // seq(h_tmp230 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_17 = SEQN(2, op_ASSIGN_hybrid_tmp_16, op_INC_14); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((ut8) ((tmpV >> i * 0x8) & ((ut32) 0xff)))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_21 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(SN(64, 0xffff), op_MUL_21); + RzILOpPure *op_NOT_23 = LOGNOT(op_LSHIFT_22); + RzILOpPure *op_AND_24 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_23); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_27 = SHIFTR0(VARL("tmpV"), op_MUL_26); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_27, CAST(32, IL_FALSE, SN(32, 0xff))); + RzILOpPure *op_AND_34 = LOGAND(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_30)), SN(32, 0xffff)); + RzILOpPure *op_MUL_37 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_38 = SHIFTL0(CAST(64, IL_FALSE, op_AND_34), op_MUL_37); + RzILOpPure *op_OR_40 = LOGOR(CAST(64, IL_FALSE, op_AND_24), op_LSHIFT_38); + RzILOpEffect *op_ASSIGN_42 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_40)); + + // seq(h_tmp230; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x1 ...; + RzILOpEffect *seq_44 = op_ASSIGN_42; + + // seq(seq(h_tmp230; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_45 = SEQN(2, seq_44, seq_17); + + // while ((i < 0x4)) { seq(seq(h_tmp230; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_13 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_46 = REPEAT(op_LT_13, seq_45); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp230; Rdd = ((st64) ...; + RzILOpEffect *seq_47 = SEQN(2, op_ASSIGN_11, for_46); + + // Re = ((st32) U); + RzILOpEffect *op_ASSIGN_50 = WRITE_REG(bundle, Re_op, CAST(32, IL_FALSE, VARL("U"))); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_0, op_ASSIGN_3, op_ASSIGN_9, seq_47, op_ASSIGN_50); + return instruction_sequence; +} + +// Rdd = memubh(Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 tmpV; + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // u = u; + RzILOpEffect *imm_assign_4 = SETL("u", u); + + // EA = U + ((ut32) (Rt << u)); + RzILOpPure *op_LSHIFT_6 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_8 = ADD(VARL("U"), CAST(32, IL_FALSE, op_LSHIFT_6)); + RzILOpEffect *op_ASSIGN_9 = SETL("EA", op_ADD_8); + + // tmpV = ((ut32) mem_load_32(EA)); + RzILOpPure *ml_EA_13 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_15 = SETL("tmpV", CAST(32, IL_FALSE, ml_EA_13)); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_17 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_20 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp231 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_22 = SETL("h_tmp231", VARL("i")); + + // seq(h_tmp231 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_23 = SEQN(2, op_ASSIGN_hybrid_tmp_22, op_INC_20); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((ut8) ((tmpV >> i * 0x8) & ((ut32) 0xff)))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_27 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_28 = SHIFTL0(SN(64, 0xffff), op_MUL_27); + RzILOpPure *op_NOT_29 = LOGNOT(op_LSHIFT_28); + RzILOpPure *op_AND_30 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_29); + RzILOpPure *op_MUL_32 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_33 = SHIFTR0(VARL("tmpV"), op_MUL_32); + RzILOpPure *op_AND_36 = LOGAND(op_RSHIFT_33, CAST(32, IL_FALSE, SN(32, 0xff))); + RzILOpPure *op_AND_40 = LOGAND(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_36)), SN(32, 0xffff)); + RzILOpPure *op_MUL_43 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_44 = SHIFTL0(CAST(64, IL_FALSE, op_AND_40), op_MUL_43); + RzILOpPure *op_OR_46 = LOGOR(CAST(64, IL_FALSE, op_AND_30), op_LSHIFT_44); + RzILOpEffect *op_ASSIGN_48 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_46)); + + // seq(h_tmp231; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x1 ...; + RzILOpEffect *seq_50 = op_ASSIGN_48; + + // seq(seq(h_tmp231; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_51 = SEQN(2, seq_50, seq_23); + + // while ((i < 0x4)) { seq(seq(h_tmp231; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_19 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_52 = REPEAT(op_LT_19, seq_51); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp231; Rdd = ((st64) ...; + RzILOpEffect *seq_53 = SEQN(2, op_ASSIGN_17, for_52); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_0, imm_assign_4, op_ASSIGN_9, op_ASSIGN_15, seq_53); + return instruction_sequence; +} + +// Rdd = memd_aq(Rs) +RzILOpEffect *hex_il_op_l4_loadd_aq(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // EA = ((ut32) Rs); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, Rs)); + + // Rdd = ((st64) ((ut64) mem_load_64(EA))); + RzILOpPure *ml_EA_6 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, CAST(64, IL_FALSE, ml_EA_6))); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_3, op_ASSIGN_9); + return instruction_sequence; +} + +// Rdd = memd_locked(Rs) +RzILOpEffect *hex_il_op_l4_loadd_locked(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = memb(Re=II) +RzILOpEffect *hex_il_op_l4_loadrb_ap(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Re_op = ISA2REG(hi, 'e', false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // EA = U; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("U")); + + // Rd = ((st32) ((st8) mem_load_8(EA))); + RzILOpPure *ml_EA_6 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(8, MSB(ml_EA_6), DUP(ml_EA_6))), CAST(8, MSB(DUP(ml_EA_6)), DUP(ml_EA_6)))); + + // Re = ((st32) U); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Re_op, CAST(32, IL_FALSE, VARL("U"))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, op_ASSIGN_3, op_ASSIGN_9, op_ASSIGN_12); + return instruction_sequence; +} + +// Rd = memb(Rs+Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Rt << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // Rd = ((st32) ((st8) mem_load_8(EA))); + RzILOpPure *ml_EA_11 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_14 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(8, MSB(ml_EA_11), DUP(ml_EA_11))), CAST(8, MSB(DUP(ml_EA_11)), DUP(ml_EA_11)))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, op_ASSIGN_14); + return instruction_sequence; +} + +// Rd = memb(Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // u = u; + RzILOpEffect *imm_assign_4 = SETL("u", u); + + // EA = U + ((ut32) (Rt << u)); + RzILOpPure *op_LSHIFT_6 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_8 = ADD(VARL("U"), CAST(32, IL_FALSE, op_LSHIFT_6)); + RzILOpEffect *op_ASSIGN_9 = SETL("EA", op_ADD_8); + + // Rd = ((st32) ((st8) mem_load_8(EA))); + RzILOpPure *ml_EA_12 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_15 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(8, MSB(ml_EA_12), DUP(ml_EA_12))), CAST(8, MSB(DUP(ml_EA_12)), DUP(ml_EA_12)))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, imm_assign_4, op_ASSIGN_9, op_ASSIGN_15); + return instruction_sequence; +} + +// Rdd = memd(Re=II) +RzILOpEffect *hex_il_op_l4_loadrd_ap(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Re_op = ISA2REG(hi, 'e', false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // EA = U; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("U")); + + // Rdd = ((st64) ((ut64) mem_load_64(EA))); + RzILOpPure *ml_EA_6 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, CAST(64, IL_FALSE, ml_EA_6))); + + // Re = ((st32) U); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Re_op, CAST(32, IL_FALSE, VARL("U"))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, op_ASSIGN_3, op_ASSIGN_9, op_ASSIGN_12); + return instruction_sequence; +} + +// Rdd = memd(Rs+Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Rt << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // Rdd = ((st64) ((ut64) mem_load_64(EA))); + RzILOpPure *ml_EA_11 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_14 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, CAST(64, IL_FALSE, ml_EA_11))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, op_ASSIGN_14); + return instruction_sequence; +} + +// Rdd = memd(Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // u = u; + RzILOpEffect *imm_assign_4 = SETL("u", u); + + // EA = U + ((ut32) (Rt << u)); + RzILOpPure *op_LSHIFT_6 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_8 = ADD(VARL("U"), CAST(32, IL_FALSE, op_LSHIFT_6)); + RzILOpEffect *op_ASSIGN_9 = SETL("EA", op_ADD_8); + + // Rdd = ((st64) ((ut64) mem_load_64(EA))); + RzILOpPure *ml_EA_12 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_15 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, CAST(64, IL_FALSE, ml_EA_12))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, imm_assign_4, op_ASSIGN_9, op_ASSIGN_15); + return instruction_sequence; +} + +// Rd = memh(Re=II) +RzILOpEffect *hex_il_op_l4_loadrh_ap(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Re_op = ISA2REG(hi, 'e', false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // EA = U; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("U")); + + // Rd = ((st32) ((st16) mem_load_16(EA))); + RzILOpPure *ml_EA_6 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(16, MSB(ml_EA_6), DUP(ml_EA_6))), CAST(16, MSB(DUP(ml_EA_6)), DUP(ml_EA_6)))); + + // Re = ((st32) U); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Re_op, CAST(32, IL_FALSE, VARL("U"))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, op_ASSIGN_3, op_ASSIGN_9, op_ASSIGN_12); + return instruction_sequence; +} + +// Rd = memh(Rs+Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Rt << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // Rd = ((st32) ((st16) mem_load_16(EA))); + RzILOpPure *ml_EA_11 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_14 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(16, MSB(ml_EA_11), DUP(ml_EA_11))), CAST(16, MSB(DUP(ml_EA_11)), DUP(ml_EA_11)))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, op_ASSIGN_14); + return instruction_sequence; +} + +// Rd = memh(Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // u = u; + RzILOpEffect *imm_assign_4 = SETL("u", u); + + // EA = U + ((ut32) (Rt << u)); + RzILOpPure *op_LSHIFT_6 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_8 = ADD(VARL("U"), CAST(32, IL_FALSE, op_LSHIFT_6)); + RzILOpEffect *op_ASSIGN_9 = SETL("EA", op_ADD_8); + + // Rd = ((st32) ((st16) mem_load_16(EA))); + RzILOpPure *ml_EA_12 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_15 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(16, MSB(ml_EA_12), DUP(ml_EA_12))), CAST(16, MSB(DUP(ml_EA_12)), DUP(ml_EA_12)))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, imm_assign_4, op_ASSIGN_9, op_ASSIGN_15); + return instruction_sequence; +} + +// Rd = memw(Re=II) +RzILOpEffect *hex_il_op_l4_loadri_ap(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Re_op = ISA2REG(hi, 'e', false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // EA = U; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("U")); + + // Rd = ((st32) ((ut32) mem_load_32(EA))); + RzILOpPure *ml_EA_6 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(32, IL_FALSE, ml_EA_6))); + + // Re = ((st32) U); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Re_op, CAST(32, IL_FALSE, VARL("U"))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, op_ASSIGN_3, op_ASSIGN_9, op_ASSIGN_12); + return instruction_sequence; +} + +// Rd = memw(Rs+Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Rt << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // Rd = ((st32) ((ut32) mem_load_32(EA))); + RzILOpPure *ml_EA_11 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_14 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(32, IL_FALSE, ml_EA_11))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, op_ASSIGN_14); + return instruction_sequence; +} + +// Rd = memw(Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // u = u; + RzILOpEffect *imm_assign_4 = SETL("u", u); + + // EA = U + ((ut32) (Rt << u)); + RzILOpPure *op_LSHIFT_6 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_8 = ADD(VARL("U"), CAST(32, IL_FALSE, op_LSHIFT_6)); + RzILOpEffect *op_ASSIGN_9 = SETL("EA", op_ADD_8); + + // Rd = ((st32) ((ut32) mem_load_32(EA))); + RzILOpPure *ml_EA_12 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_15 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(32, IL_FALSE, ml_EA_12))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, imm_assign_4, op_ASSIGN_9, op_ASSIGN_15); + return instruction_sequence; +} + +// Rd = memub(Re=II) +RzILOpEffect *hex_il_op_l4_loadrub_ap(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Re_op = ISA2REG(hi, 'e', false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // EA = U; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("U")); + + // Rd = ((st32) ((ut8) mem_load_8(EA))); + RzILOpPure *ml_EA_6 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(8, IL_FALSE, ml_EA_6))); + + // Re = ((st32) U); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Re_op, CAST(32, IL_FALSE, VARL("U"))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, op_ASSIGN_3, op_ASSIGN_9, op_ASSIGN_12); + return instruction_sequence; +} + +// Rd = memub(Rs+Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Rt << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // Rd = ((st32) ((ut8) mem_load_8(EA))); + RzILOpPure *ml_EA_11 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_14 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(8, IL_FALSE, ml_EA_11))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, op_ASSIGN_14); + return instruction_sequence; +} + +// Rd = memub(Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // u = u; + RzILOpEffect *imm_assign_4 = SETL("u", u); + + // EA = U + ((ut32) (Rt << u)); + RzILOpPure *op_LSHIFT_6 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_8 = ADD(VARL("U"), CAST(32, IL_FALSE, op_LSHIFT_6)); + RzILOpEffect *op_ASSIGN_9 = SETL("EA", op_ADD_8); + + // Rd = ((st32) ((ut8) mem_load_8(EA))); + RzILOpPure *ml_EA_12 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_15 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(8, IL_FALSE, ml_EA_12))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, imm_assign_4, op_ASSIGN_9, op_ASSIGN_15); + return instruction_sequence; +} + +// Rd = memuh(Re=II) +RzILOpEffect *hex_il_op_l4_loadruh_ap(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Re_op = ISA2REG(hi, 'e', false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // EA = U; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("U")); + + // Rd = ((st32) ((ut16) mem_load_16(EA))); + RzILOpPure *ml_EA_6 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(16, IL_FALSE, ml_EA_6))); + + // Re = ((st32) U); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Re_op, CAST(32, IL_FALSE, VARL("U"))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, op_ASSIGN_3, op_ASSIGN_9, op_ASSIGN_12); + return instruction_sequence; +} + +// Rd = memuh(Rs+Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Rt << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // Rd = ((st32) ((ut16) mem_load_16(EA))); + RzILOpPure *ml_EA_11 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_14 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(16, IL_FALSE, ml_EA_11))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, op_ASSIGN_14); + return instruction_sequence; +} + +// Rd = memuh(Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // u = u; + RzILOpEffect *imm_assign_4 = SETL("u", u); + + // EA = U + ((ut32) (Rt << u)); + RzILOpPure *op_LSHIFT_6 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_8 = ADD(VARL("U"), CAST(32, IL_FALSE, op_LSHIFT_6)); + RzILOpEffect *op_ASSIGN_9 = SETL("EA", op_ADD_8); + + // Rd = ((st32) ((ut16) mem_load_16(EA))); + RzILOpPure *ml_EA_12 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_15 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(16, IL_FALSE, ml_EA_12))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, imm_assign_4, op_ASSIGN_9, op_ASSIGN_15); + return instruction_sequence; +} + +// memb(Rs+Ii) |= Rt +RzILOpEffect *hex_il_op_l4_or_memopb_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + // Declare: st32 tmp; + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // tmp = ((st32) ((st8) mem_load_8(EA))); + RzILOpPure *ml_EA_9 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_12 = SETL("tmp", CAST(32, MSB(CAST(8, MSB(ml_EA_9), DUP(ml_EA_9))), CAST(8, MSB(DUP(ml_EA_9)), DUP(ml_EA_9)))); + + // tmp = (tmp | Rt); + RzILOpPure *op_OR_14 = LOGOR(VARL("tmp"), Rt); + RzILOpEffect *op_ASSIGN_OR_15 = SETL("tmp", op_OR_14); + + // mem_store_ut8(EA, ((ut8) tmp)); + RzILOpEffect *ms_cast_ut8_16_17 = STOREW(VARL("EA"), CAST(8, IL_FALSE, VARL("tmp"))); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_0, op_ASSIGN_6, op_ASSIGN_12, op_ASSIGN_OR_15, ms_cast_ut8_16_17); + return instruction_sequence; +} + +// memh(Rs+Ii) |= Rt +RzILOpEffect *hex_il_op_l4_or_memoph_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + // Declare: st32 tmp; + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // tmp = ((st32) ((st16) mem_load_16(EA))); + RzILOpPure *ml_EA_9 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_12 = SETL("tmp", CAST(32, MSB(CAST(16, MSB(ml_EA_9), DUP(ml_EA_9))), CAST(16, MSB(DUP(ml_EA_9)), DUP(ml_EA_9)))); + + // tmp = (tmp | Rt); + RzILOpPure *op_OR_14 = LOGOR(VARL("tmp"), Rt); + RzILOpEffect *op_ASSIGN_OR_15 = SETL("tmp", op_OR_14); + + // mem_store_ut16(EA, ((ut16) tmp)); + RzILOpEffect *ms_cast_ut16_16_17 = STOREW(VARL("EA"), CAST(16, IL_FALSE, VARL("tmp"))); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_0, op_ASSIGN_6, op_ASSIGN_12, op_ASSIGN_OR_15, ms_cast_ut16_16_17); + return instruction_sequence; +} + +// memw(Rs+Ii) |= Rt +RzILOpEffect *hex_il_op_l4_or_memopw_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + // Declare: st32 tmp; + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // tmp = ((st32) mem_load_32(EA)); + RzILOpPure *ml_EA_9 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_11 = SETL("tmp", CAST(32, MSB(ml_EA_9), DUP(ml_EA_9))); + + // tmp = (tmp | Rt); + RzILOpPure *op_OR_13 = LOGOR(VARL("tmp"), Rt); + RzILOpEffect *op_ASSIGN_OR_14 = SETL("tmp", op_OR_13); + + // mem_store_ut32(EA, ((ut32) tmp)); + RzILOpEffect *ms_cast_ut32_15_16 = STOREW(VARL("EA"), CAST(32, IL_FALSE, VARL("tmp"))); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_0, op_ASSIGN_6, op_ASSIGN_11, op_ASSIGN_OR_14, ms_cast_ut32_15_16); + return instruction_sequence; +} + +// if (!Pt) Rd = memb(Ii) +RzILOpEffect *hex_il_op_l4_ploadrbf_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // Rd = ((st32) ((st8) mem_load_8(EA))); + RzILOpPure *ml_EA_11 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_14 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(8, MSB(ml_EA_11), DUP(ml_EA_11))), CAST(8, MSB(DUP(ml_EA_11)), DUP(ml_EA_11)))); + + // nop; + RzILOpEffect *nop_15 = NOP(); + + // seq(Rd = ((st32) ((st8) mem_load_8(EA)))); + RzILOpEffect *seq_then_17 = op_ASSIGN_14; + + // seq(nop); + RzILOpEffect *seq_else_18 = nop_15; + + // if (! (((st32) Pt) & 0x1)) {seq(Rd = ((st32) ((st8) mem_load_8(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_19 = BRANCH(op_INV_9, seq_then_17, seq_else_18); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_19); + return instruction_sequence; +} + +// if (!Pv) Rd = memb(Rs+Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Rt << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // Rd = ((st32) ((st8) mem_load_8(EA))); + RzILOpPure *ml_EA_16 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_19 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(8, MSB(ml_EA_16), DUP(ml_EA_16))), CAST(8, MSB(DUP(ml_EA_16)), DUP(ml_EA_16)))); + + // nop; + RzILOpEffect *nop_20 = NOP(); + + // seq(Rd = ((st32) ((st8) mem_load_8(EA)))); + RzILOpEffect *seq_then_22 = op_ASSIGN_19; + + // seq(nop); + RzILOpEffect *seq_else_23 = nop_20; + + // if (! (((st32) Pv) & 0x1)) {seq(Rd = ((st32) ((st8) mem_load_8(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_14 = INV(NON_ZERO(op_AND_13)); + RzILOpEffect *branch_24 = BRANCH(op_INV_14, seq_then_22, seq_else_23); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_24); + return instruction_sequence; +} + +// if (!Pt.new) Rd = memb(Ii) +RzILOpEffect *hex_il_op_l4_ploadrbfnew_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pt_new_op = ISA2REG(hi, 't', true); + RzILOpPure *Pt_new = READ_REG(pkt, Pt_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // Rd = ((st32) ((st8) mem_load_8(EA))); + RzILOpPure *ml_EA_11 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_14 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(8, MSB(ml_EA_11), DUP(ml_EA_11))), CAST(8, MSB(DUP(ml_EA_11)), DUP(ml_EA_11)))); + + // nop; + RzILOpEffect *nop_15 = NOP(); + + // seq(Rd = ((st32) ((st8) mem_load_8(EA)))); + RzILOpEffect *seq_then_17 = op_ASSIGN_14; + + // seq(nop); + RzILOpEffect *seq_else_18 = nop_15; + + // if (! (((st32) Pt_new) & 0x1)) {seq(Rd = ((st32) ((st8) mem_load_8(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt_new), DUP(Pt_new)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_19 = BRANCH(op_INV_9, seq_then_17, seq_else_18); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_19); + return instruction_sequence; +} + +// if (!Pv.new) Rd = memb(Rs+Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Rt << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // Rd = ((st32) ((st8) mem_load_8(EA))); + RzILOpPure *ml_EA_16 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_19 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(8, MSB(ml_EA_16), DUP(ml_EA_16))), CAST(8, MSB(DUP(ml_EA_16)), DUP(ml_EA_16)))); + + // nop; + RzILOpEffect *nop_20 = NOP(); + + // seq(Rd = ((st32) ((st8) mem_load_8(EA)))); + RzILOpEffect *seq_then_22 = op_ASSIGN_19; + + // seq(nop); + RzILOpEffect *seq_else_23 = nop_20; + + // if (! (((st32) Pv_new) & 0x1)) {seq(Rd = ((st32) ((st8) mem_load_8(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_14 = INV(NON_ZERO(op_AND_13)); + RzILOpEffect *branch_24 = BRANCH(op_INV_14, seq_then_22, seq_else_23); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_24); + return instruction_sequence; +} + +// if (Pt) Rd = memb(Ii) +RzILOpEffect *hex_il_op_l4_ploadrbt_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // Rd = ((st32) ((st8) mem_load_8(EA))); + RzILOpPure *ml_EA_10 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(8, MSB(ml_EA_10), DUP(ml_EA_10))), CAST(8, MSB(DUP(ml_EA_10)), DUP(ml_EA_10)))); + + // nop; + RzILOpEffect *nop_14 = NOP(); + + // seq(Rd = ((st32) ((st8) mem_load_8(EA)))); + RzILOpEffect *seq_then_16 = op_ASSIGN_13; + + // seq(nop); + RzILOpEffect *seq_else_17 = nop_14; + + // if ((((st32) Pt) & 0x1)) {seq(Rd = ((st32) ((st8) mem_load_8(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_8), seq_then_16, seq_else_17); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_18); + return instruction_sequence; +} + +// if (Pv) Rd = memb(Rs+Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Rt << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // Rd = ((st32) ((st8) mem_load_8(EA))); + RzILOpPure *ml_EA_15 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(8, MSB(ml_EA_15), DUP(ml_EA_15))), CAST(8, MSB(DUP(ml_EA_15)), DUP(ml_EA_15)))); + + // nop; + RzILOpEffect *nop_19 = NOP(); + + // seq(Rd = ((st32) ((st8) mem_load_8(EA)))); + RzILOpEffect *seq_then_21 = op_ASSIGN_18; + + // seq(nop); + RzILOpEffect *seq_else_22 = nop_19; + + // if ((((st32) Pv) & 0x1)) {seq(Rd = ((st32) ((st8) mem_load_8(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_23 = BRANCH(NON_ZERO(op_AND_13), seq_then_21, seq_else_22); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_23); + return instruction_sequence; +} + +// if (Pt.new) Rd = memb(Ii) +RzILOpEffect *hex_il_op_l4_ploadrbtnew_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pt_new_op = ISA2REG(hi, 't', true); + RzILOpPure *Pt_new = READ_REG(pkt, Pt_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // Rd = ((st32) ((st8) mem_load_8(EA))); + RzILOpPure *ml_EA_10 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(8, MSB(ml_EA_10), DUP(ml_EA_10))), CAST(8, MSB(DUP(ml_EA_10)), DUP(ml_EA_10)))); + + // nop; + RzILOpEffect *nop_14 = NOP(); + + // seq(Rd = ((st32) ((st8) mem_load_8(EA)))); + RzILOpEffect *seq_then_16 = op_ASSIGN_13; + + // seq(nop); + RzILOpEffect *seq_else_17 = nop_14; + + // if ((((st32) Pt_new) & 0x1)) {seq(Rd = ((st32) ((st8) mem_load_8(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt_new), DUP(Pt_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_8), seq_then_16, seq_else_17); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_18); + return instruction_sequence; +} + +// if (Pv.new) Rd = memb(Rs+Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Rt << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // Rd = ((st32) ((st8) mem_load_8(EA))); + RzILOpPure *ml_EA_15 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(8, MSB(ml_EA_15), DUP(ml_EA_15))), CAST(8, MSB(DUP(ml_EA_15)), DUP(ml_EA_15)))); + + // nop; + RzILOpEffect *nop_19 = NOP(); + + // seq(Rd = ((st32) ((st8) mem_load_8(EA)))); + RzILOpEffect *seq_then_21 = op_ASSIGN_18; + + // seq(nop); + RzILOpEffect *seq_else_22 = nop_19; + + // if ((((st32) Pv_new) & 0x1)) {seq(Rd = ((st32) ((st8) mem_load_8(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_23 = BRANCH(NON_ZERO(op_AND_13), seq_then_21, seq_else_22); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_23); + return instruction_sequence; +} + +// if (!Pt) Rdd = memd(Ii) +RzILOpEffect *hex_il_op_l4_ploadrdf_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // Rdd = ((st64) ((ut64) mem_load_64(EA))); + RzILOpPure *ml_EA_11 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_14 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, CAST(64, IL_FALSE, ml_EA_11))); + + // nop; + RzILOpEffect *nop_15 = NOP(); + + // seq(Rdd = ((st64) ((ut64) mem_load_64(EA)))); + RzILOpEffect *seq_then_17 = op_ASSIGN_14; + + // seq(nop); + RzILOpEffect *seq_else_18 = nop_15; + + // if (! (((st32) Pt) & 0x1)) {seq(Rdd = ((st64) ((ut64) mem_load_64(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_19 = BRANCH(op_INV_9, seq_then_17, seq_else_18); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_19); + return instruction_sequence; +} + +// if (!Pv) Rdd = memd(Rs+Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Rt << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // Rdd = ((st64) ((ut64) mem_load_64(EA))); + RzILOpPure *ml_EA_16 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_19 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, CAST(64, IL_FALSE, ml_EA_16))); + + // nop; + RzILOpEffect *nop_20 = NOP(); + + // seq(Rdd = ((st64) ((ut64) mem_load_64(EA)))); + RzILOpEffect *seq_then_22 = op_ASSIGN_19; + + // seq(nop); + RzILOpEffect *seq_else_23 = nop_20; + + // if (! (((st32) Pv) & 0x1)) {seq(Rdd = ((st64) ((ut64) mem_load_64(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_14 = INV(NON_ZERO(op_AND_13)); + RzILOpEffect *branch_24 = BRANCH(op_INV_14, seq_then_22, seq_else_23); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_24); + return instruction_sequence; +} + +// if (!Pt.new) Rdd = memd(Ii) +RzILOpEffect *hex_il_op_l4_ploadrdfnew_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pt_new_op = ISA2REG(hi, 't', true); + RzILOpPure *Pt_new = READ_REG(pkt, Pt_new_op, true); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // Rdd = ((st64) ((ut64) mem_load_64(EA))); + RzILOpPure *ml_EA_11 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_14 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, CAST(64, IL_FALSE, ml_EA_11))); + + // nop; + RzILOpEffect *nop_15 = NOP(); + + // seq(Rdd = ((st64) ((ut64) mem_load_64(EA)))); + RzILOpEffect *seq_then_17 = op_ASSIGN_14; + + // seq(nop); + RzILOpEffect *seq_else_18 = nop_15; + + // if (! (((st32) Pt_new) & 0x1)) {seq(Rdd = ((st64) ((ut64) mem_load_64(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt_new), DUP(Pt_new)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_19 = BRANCH(op_INV_9, seq_then_17, seq_else_18); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_19); + return instruction_sequence; +} + +// if (!Pv.new) Rdd = memd(Rs+Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Rt << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // Rdd = ((st64) ((ut64) mem_load_64(EA))); + RzILOpPure *ml_EA_16 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_19 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, CAST(64, IL_FALSE, ml_EA_16))); + + // nop; + RzILOpEffect *nop_20 = NOP(); + + // seq(Rdd = ((st64) ((ut64) mem_load_64(EA)))); + RzILOpEffect *seq_then_22 = op_ASSIGN_19; + + // seq(nop); + RzILOpEffect *seq_else_23 = nop_20; + + // if (! (((st32) Pv_new) & 0x1)) {seq(Rdd = ((st64) ((ut64) mem_load_64(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_14 = INV(NON_ZERO(op_AND_13)); + RzILOpEffect *branch_24 = BRANCH(op_INV_14, seq_then_22, seq_else_23); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_24); + return instruction_sequence; +} + +// if (Pt) Rdd = memd(Ii) +RzILOpEffect *hex_il_op_l4_ploadrdt_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // Rdd = ((st64) ((ut64) mem_load_64(EA))); + RzILOpPure *ml_EA_10 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, CAST(64, IL_FALSE, ml_EA_10))); + + // nop; + RzILOpEffect *nop_14 = NOP(); + + // seq(Rdd = ((st64) ((ut64) mem_load_64(EA)))); + RzILOpEffect *seq_then_16 = op_ASSIGN_13; + + // seq(nop); + RzILOpEffect *seq_else_17 = nop_14; + + // if ((((st32) Pt) & 0x1)) {seq(Rdd = ((st64) ((ut64) mem_load_64(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_8), seq_then_16, seq_else_17); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_18); + return instruction_sequence; +} + +// if (Pv) Rdd = memd(Rs+Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Rt << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // Rdd = ((st64) ((ut64) mem_load_64(EA))); + RzILOpPure *ml_EA_15 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, CAST(64, IL_FALSE, ml_EA_15))); + + // nop; + RzILOpEffect *nop_19 = NOP(); + + // seq(Rdd = ((st64) ((ut64) mem_load_64(EA)))); + RzILOpEffect *seq_then_21 = op_ASSIGN_18; + + // seq(nop); + RzILOpEffect *seq_else_22 = nop_19; + + // if ((((st32) Pv) & 0x1)) {seq(Rdd = ((st64) ((ut64) mem_load_64(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_23 = BRANCH(NON_ZERO(op_AND_13), seq_then_21, seq_else_22); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_23); + return instruction_sequence; +} + +// if (Pt.new) Rdd = memd(Ii) +RzILOpEffect *hex_il_op_l4_ploadrdtnew_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pt_new_op = ISA2REG(hi, 't', true); + RzILOpPure *Pt_new = READ_REG(pkt, Pt_new_op, true); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // Rdd = ((st64) ((ut64) mem_load_64(EA))); + RzILOpPure *ml_EA_10 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, CAST(64, IL_FALSE, ml_EA_10))); + + // nop; + RzILOpEffect *nop_14 = NOP(); + + // seq(Rdd = ((st64) ((ut64) mem_load_64(EA)))); + RzILOpEffect *seq_then_16 = op_ASSIGN_13; + + // seq(nop); + RzILOpEffect *seq_else_17 = nop_14; + + // if ((((st32) Pt_new) & 0x1)) {seq(Rdd = ((st64) ((ut64) mem_load_64(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt_new), DUP(Pt_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_8), seq_then_16, seq_else_17); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_18); + return instruction_sequence; +} + +// if (Pv.new) Rdd = memd(Rs+Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Rt << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // Rdd = ((st64) ((ut64) mem_load_64(EA))); + RzILOpPure *ml_EA_15 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, CAST(64, IL_FALSE, ml_EA_15))); + + // nop; + RzILOpEffect *nop_19 = NOP(); + + // seq(Rdd = ((st64) ((ut64) mem_load_64(EA)))); + RzILOpEffect *seq_then_21 = op_ASSIGN_18; + + // seq(nop); + RzILOpEffect *seq_else_22 = nop_19; + + // if ((((st32) Pv_new) & 0x1)) {seq(Rdd = ((st64) ((ut64) mem_load_64(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_23 = BRANCH(NON_ZERO(op_AND_13), seq_then_21, seq_else_22); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_23); + return instruction_sequence; +} + +// if (!Pt) Rd = memh(Ii) +RzILOpEffect *hex_il_op_l4_ploadrhf_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // Rd = ((st32) ((st16) mem_load_16(EA))); + RzILOpPure *ml_EA_11 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_14 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(16, MSB(ml_EA_11), DUP(ml_EA_11))), CAST(16, MSB(DUP(ml_EA_11)), DUP(ml_EA_11)))); + + // nop; + RzILOpEffect *nop_15 = NOP(); + + // seq(Rd = ((st32) ((st16) mem_load_16(EA)))); + RzILOpEffect *seq_then_17 = op_ASSIGN_14; + + // seq(nop); + RzILOpEffect *seq_else_18 = nop_15; + + // if (! (((st32) Pt) & 0x1)) {seq(Rd = ((st32) ((st16) mem_load_16(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_19 = BRANCH(op_INV_9, seq_then_17, seq_else_18); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_19); + return instruction_sequence; +} + +// if (!Pv) Rd = memh(Rs+Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Rt << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // Rd = ((st32) ((st16) mem_load_16(EA))); + RzILOpPure *ml_EA_16 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_19 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(16, MSB(ml_EA_16), DUP(ml_EA_16))), CAST(16, MSB(DUP(ml_EA_16)), DUP(ml_EA_16)))); + + // nop; + RzILOpEffect *nop_20 = NOP(); + + // seq(Rd = ((st32) ((st16) mem_load_16(EA)))); + RzILOpEffect *seq_then_22 = op_ASSIGN_19; + + // seq(nop); + RzILOpEffect *seq_else_23 = nop_20; + + // if (! (((st32) Pv) & 0x1)) {seq(Rd = ((st32) ((st16) mem_load_16(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_14 = INV(NON_ZERO(op_AND_13)); + RzILOpEffect *branch_24 = BRANCH(op_INV_14, seq_then_22, seq_else_23); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_24); + return instruction_sequence; +} + +// if (!Pt.new) Rd = memh(Ii) +RzILOpEffect *hex_il_op_l4_ploadrhfnew_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pt_new_op = ISA2REG(hi, 't', true); + RzILOpPure *Pt_new = READ_REG(pkt, Pt_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // Rd = ((st32) ((st16) mem_load_16(EA))); + RzILOpPure *ml_EA_11 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_14 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(16, MSB(ml_EA_11), DUP(ml_EA_11))), CAST(16, MSB(DUP(ml_EA_11)), DUP(ml_EA_11)))); + + // nop; + RzILOpEffect *nop_15 = NOP(); + + // seq(Rd = ((st32) ((st16) mem_load_16(EA)))); + RzILOpEffect *seq_then_17 = op_ASSIGN_14; + + // seq(nop); + RzILOpEffect *seq_else_18 = nop_15; + + // if (! (((st32) Pt_new) & 0x1)) {seq(Rd = ((st32) ((st16) mem_load_16(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt_new), DUP(Pt_new)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_19 = BRANCH(op_INV_9, seq_then_17, seq_else_18); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_19); + return instruction_sequence; +} + +// if (!Pv.new) Rd = memh(Rs+Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Rt << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // Rd = ((st32) ((st16) mem_load_16(EA))); + RzILOpPure *ml_EA_16 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_19 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(16, MSB(ml_EA_16), DUP(ml_EA_16))), CAST(16, MSB(DUP(ml_EA_16)), DUP(ml_EA_16)))); + + // nop; + RzILOpEffect *nop_20 = NOP(); + + // seq(Rd = ((st32) ((st16) mem_load_16(EA)))); + RzILOpEffect *seq_then_22 = op_ASSIGN_19; + + // seq(nop); + RzILOpEffect *seq_else_23 = nop_20; + + // if (! (((st32) Pv_new) & 0x1)) {seq(Rd = ((st32) ((st16) mem_load_16(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_14 = INV(NON_ZERO(op_AND_13)); + RzILOpEffect *branch_24 = BRANCH(op_INV_14, seq_then_22, seq_else_23); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_24); + return instruction_sequence; +} + +// if (Pt) Rd = memh(Ii) +RzILOpEffect *hex_il_op_l4_ploadrht_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // Rd = ((st32) ((st16) mem_load_16(EA))); + RzILOpPure *ml_EA_10 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(16, MSB(ml_EA_10), DUP(ml_EA_10))), CAST(16, MSB(DUP(ml_EA_10)), DUP(ml_EA_10)))); + + // nop; + RzILOpEffect *nop_14 = NOP(); + + // seq(Rd = ((st32) ((st16) mem_load_16(EA)))); + RzILOpEffect *seq_then_16 = op_ASSIGN_13; + + // seq(nop); + RzILOpEffect *seq_else_17 = nop_14; + + // if ((((st32) Pt) & 0x1)) {seq(Rd = ((st32) ((st16) mem_load_16(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_8), seq_then_16, seq_else_17); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_18); + return instruction_sequence; +} + +// if (Pv) Rd = memh(Rs+Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Rt << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // Rd = ((st32) ((st16) mem_load_16(EA))); + RzILOpPure *ml_EA_15 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(16, MSB(ml_EA_15), DUP(ml_EA_15))), CAST(16, MSB(DUP(ml_EA_15)), DUP(ml_EA_15)))); + + // nop; + RzILOpEffect *nop_19 = NOP(); + + // seq(Rd = ((st32) ((st16) mem_load_16(EA)))); + RzILOpEffect *seq_then_21 = op_ASSIGN_18; + + // seq(nop); + RzILOpEffect *seq_else_22 = nop_19; + + // if ((((st32) Pv) & 0x1)) {seq(Rd = ((st32) ((st16) mem_load_16(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_23 = BRANCH(NON_ZERO(op_AND_13), seq_then_21, seq_else_22); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_23); + return instruction_sequence; +} + +// if (Pt.new) Rd = memh(Ii) +RzILOpEffect *hex_il_op_l4_ploadrhtnew_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pt_new_op = ISA2REG(hi, 't', true); + RzILOpPure *Pt_new = READ_REG(pkt, Pt_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // Rd = ((st32) ((st16) mem_load_16(EA))); + RzILOpPure *ml_EA_10 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(16, MSB(ml_EA_10), DUP(ml_EA_10))), CAST(16, MSB(DUP(ml_EA_10)), DUP(ml_EA_10)))); + + // nop; + RzILOpEffect *nop_14 = NOP(); + + // seq(Rd = ((st32) ((st16) mem_load_16(EA)))); + RzILOpEffect *seq_then_16 = op_ASSIGN_13; + + // seq(nop); + RzILOpEffect *seq_else_17 = nop_14; + + // if ((((st32) Pt_new) & 0x1)) {seq(Rd = ((st32) ((st16) mem_load_16(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt_new), DUP(Pt_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_8), seq_then_16, seq_else_17); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_18); + return instruction_sequence; +} + +// if (Pv.new) Rd = memh(Rs+Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Rt << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // Rd = ((st32) ((st16) mem_load_16(EA))); + RzILOpPure *ml_EA_15 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(16, MSB(ml_EA_15), DUP(ml_EA_15))), CAST(16, MSB(DUP(ml_EA_15)), DUP(ml_EA_15)))); + + // nop; + RzILOpEffect *nop_19 = NOP(); + + // seq(Rd = ((st32) ((st16) mem_load_16(EA)))); + RzILOpEffect *seq_then_21 = op_ASSIGN_18; + + // seq(nop); + RzILOpEffect *seq_else_22 = nop_19; + + // if ((((st32) Pv_new) & 0x1)) {seq(Rd = ((st32) ((st16) mem_load_16(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_23 = BRANCH(NON_ZERO(op_AND_13), seq_then_21, seq_else_22); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_23); + return instruction_sequence; +} + +// if (!Pt) Rd = memw(Ii) +RzILOpEffect *hex_il_op_l4_ploadrif_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // Rd = ((st32) ((ut32) mem_load_32(EA))); + RzILOpPure *ml_EA_11 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_14 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(32, IL_FALSE, ml_EA_11))); + + // nop; + RzILOpEffect *nop_15 = NOP(); + + // seq(Rd = ((st32) ((ut32) mem_load_32(EA)))); + RzILOpEffect *seq_then_17 = op_ASSIGN_14; + + // seq(nop); + RzILOpEffect *seq_else_18 = nop_15; + + // if (! (((st32) Pt) & 0x1)) {seq(Rd = ((st32) ((ut32) mem_load_32(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_19 = BRANCH(op_INV_9, seq_then_17, seq_else_18); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_19); + return instruction_sequence; +} + +// if (!Pv) Rd = memw(Rs+Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Rt << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // Rd = ((st32) ((ut32) mem_load_32(EA))); + RzILOpPure *ml_EA_16 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_19 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(32, IL_FALSE, ml_EA_16))); + + // nop; + RzILOpEffect *nop_20 = NOP(); + + // seq(Rd = ((st32) ((ut32) mem_load_32(EA)))); + RzILOpEffect *seq_then_22 = op_ASSIGN_19; + + // seq(nop); + RzILOpEffect *seq_else_23 = nop_20; + + // if (! (((st32) Pv) & 0x1)) {seq(Rd = ((st32) ((ut32) mem_load_32(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_14 = INV(NON_ZERO(op_AND_13)); + RzILOpEffect *branch_24 = BRANCH(op_INV_14, seq_then_22, seq_else_23); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_24); + return instruction_sequence; +} + +// if (!Pt.new) Rd = memw(Ii) +RzILOpEffect *hex_il_op_l4_ploadrifnew_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pt_new_op = ISA2REG(hi, 't', true); + RzILOpPure *Pt_new = READ_REG(pkt, Pt_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // Rd = ((st32) ((ut32) mem_load_32(EA))); + RzILOpPure *ml_EA_11 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_14 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(32, IL_FALSE, ml_EA_11))); + + // nop; + RzILOpEffect *nop_15 = NOP(); + + // seq(Rd = ((st32) ((ut32) mem_load_32(EA)))); + RzILOpEffect *seq_then_17 = op_ASSIGN_14; + + // seq(nop); + RzILOpEffect *seq_else_18 = nop_15; + + // if (! (((st32) Pt_new) & 0x1)) {seq(Rd = ((st32) ((ut32) mem_load_32(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt_new), DUP(Pt_new)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_19 = BRANCH(op_INV_9, seq_then_17, seq_else_18); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_19); + return instruction_sequence; +} + +// if (!Pv.new) Rd = memw(Rs+Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Rt << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // Rd = ((st32) ((ut32) mem_load_32(EA))); + RzILOpPure *ml_EA_16 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_19 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(32, IL_FALSE, ml_EA_16))); + + // nop; + RzILOpEffect *nop_20 = NOP(); + + // seq(Rd = ((st32) ((ut32) mem_load_32(EA)))); + RzILOpEffect *seq_then_22 = op_ASSIGN_19; + + // seq(nop); + RzILOpEffect *seq_else_23 = nop_20; + + // if (! (((st32) Pv_new) & 0x1)) {seq(Rd = ((st32) ((ut32) mem_load_32(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_14 = INV(NON_ZERO(op_AND_13)); + RzILOpEffect *branch_24 = BRANCH(op_INV_14, seq_then_22, seq_else_23); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_24); + return instruction_sequence; +} + +// if (Pt) Rd = memw(Ii) +RzILOpEffect *hex_il_op_l4_ploadrit_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // Rd = ((st32) ((ut32) mem_load_32(EA))); + RzILOpPure *ml_EA_10 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(32, IL_FALSE, ml_EA_10))); + + // nop; + RzILOpEffect *nop_14 = NOP(); + + // seq(Rd = ((st32) ((ut32) mem_load_32(EA)))); + RzILOpEffect *seq_then_16 = op_ASSIGN_13; + + // seq(nop); + RzILOpEffect *seq_else_17 = nop_14; + + // if ((((st32) Pt) & 0x1)) {seq(Rd = ((st32) ((ut32) mem_load_32(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_8), seq_then_16, seq_else_17); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_18); + return instruction_sequence; +} + +// if (Pv) Rd = memw(Rs+Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Rt << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // Rd = ((st32) ((ut32) mem_load_32(EA))); + RzILOpPure *ml_EA_15 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(32, IL_FALSE, ml_EA_15))); + + // nop; + RzILOpEffect *nop_19 = NOP(); + + // seq(Rd = ((st32) ((ut32) mem_load_32(EA)))); + RzILOpEffect *seq_then_21 = op_ASSIGN_18; + + // seq(nop); + RzILOpEffect *seq_else_22 = nop_19; + + // if ((((st32) Pv) & 0x1)) {seq(Rd = ((st32) ((ut32) mem_load_32(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_23 = BRANCH(NON_ZERO(op_AND_13), seq_then_21, seq_else_22); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_23); + return instruction_sequence; +} + +// if (Pt.new) Rd = memw(Ii) +RzILOpEffect *hex_il_op_l4_ploadritnew_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pt_new_op = ISA2REG(hi, 't', true); + RzILOpPure *Pt_new = READ_REG(pkt, Pt_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // Rd = ((st32) ((ut32) mem_load_32(EA))); + RzILOpPure *ml_EA_10 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(32, IL_FALSE, ml_EA_10))); + + // nop; + RzILOpEffect *nop_14 = NOP(); + + // seq(Rd = ((st32) ((ut32) mem_load_32(EA)))); + RzILOpEffect *seq_then_16 = op_ASSIGN_13; + + // seq(nop); + RzILOpEffect *seq_else_17 = nop_14; + + // if ((((st32) Pt_new) & 0x1)) {seq(Rd = ((st32) ((ut32) mem_load_32(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt_new), DUP(Pt_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_8), seq_then_16, seq_else_17); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_18); + return instruction_sequence; +} + +// if (Pv.new) Rd = memw(Rs+Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Rt << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // Rd = ((st32) ((ut32) mem_load_32(EA))); + RzILOpPure *ml_EA_15 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(32, IL_FALSE, ml_EA_15))); + + // nop; + RzILOpEffect *nop_19 = NOP(); + + // seq(Rd = ((st32) ((ut32) mem_load_32(EA)))); + RzILOpEffect *seq_then_21 = op_ASSIGN_18; + + // seq(nop); + RzILOpEffect *seq_else_22 = nop_19; + + // if ((((st32) Pv_new) & 0x1)) {seq(Rd = ((st32) ((ut32) mem_load_32(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_23 = BRANCH(NON_ZERO(op_AND_13), seq_then_21, seq_else_22); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_23); + return instruction_sequence; +} + +// if (!Pt) Rd = memub(Ii) +RzILOpEffect *hex_il_op_l4_ploadrubf_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // Rd = ((st32) ((ut8) mem_load_8(EA))); + RzILOpPure *ml_EA_11 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_14 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(8, IL_FALSE, ml_EA_11))); + + // nop; + RzILOpEffect *nop_15 = NOP(); + + // seq(Rd = ((st32) ((ut8) mem_load_8(EA)))); + RzILOpEffect *seq_then_17 = op_ASSIGN_14; + + // seq(nop); + RzILOpEffect *seq_else_18 = nop_15; + + // if (! (((st32) Pt) & 0x1)) {seq(Rd = ((st32) ((ut8) mem_load_8(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_19 = BRANCH(op_INV_9, seq_then_17, seq_else_18); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_19); + return instruction_sequence; +} + +// if (!Pv) Rd = memub(Rs+Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Rt << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // Rd = ((st32) ((ut8) mem_load_8(EA))); + RzILOpPure *ml_EA_16 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_19 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(8, IL_FALSE, ml_EA_16))); + + // nop; + RzILOpEffect *nop_20 = NOP(); + + // seq(Rd = ((st32) ((ut8) mem_load_8(EA)))); + RzILOpEffect *seq_then_22 = op_ASSIGN_19; + + // seq(nop); + RzILOpEffect *seq_else_23 = nop_20; + + // if (! (((st32) Pv) & 0x1)) {seq(Rd = ((st32) ((ut8) mem_load_8(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_14 = INV(NON_ZERO(op_AND_13)); + RzILOpEffect *branch_24 = BRANCH(op_INV_14, seq_then_22, seq_else_23); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_24); + return instruction_sequence; +} + +// if (!Pt.new) Rd = memub(Ii) +RzILOpEffect *hex_il_op_l4_ploadrubfnew_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pt_new_op = ISA2REG(hi, 't', true); + RzILOpPure *Pt_new = READ_REG(pkt, Pt_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // Rd = ((st32) ((ut8) mem_load_8(EA))); + RzILOpPure *ml_EA_11 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_14 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(8, IL_FALSE, ml_EA_11))); + + // nop; + RzILOpEffect *nop_15 = NOP(); + + // seq(Rd = ((st32) ((ut8) mem_load_8(EA)))); + RzILOpEffect *seq_then_17 = op_ASSIGN_14; + + // seq(nop); + RzILOpEffect *seq_else_18 = nop_15; + + // if (! (((st32) Pt_new) & 0x1)) {seq(Rd = ((st32) ((ut8) mem_load_8(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt_new), DUP(Pt_new)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_19 = BRANCH(op_INV_9, seq_then_17, seq_else_18); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_19); + return instruction_sequence; +} + +// if (!Pv.new) Rd = memub(Rs+Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Rt << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // Rd = ((st32) ((ut8) mem_load_8(EA))); + RzILOpPure *ml_EA_16 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_19 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(8, IL_FALSE, ml_EA_16))); + + // nop; + RzILOpEffect *nop_20 = NOP(); + + // seq(Rd = ((st32) ((ut8) mem_load_8(EA)))); + RzILOpEffect *seq_then_22 = op_ASSIGN_19; + + // seq(nop); + RzILOpEffect *seq_else_23 = nop_20; + + // if (! (((st32) Pv_new) & 0x1)) {seq(Rd = ((st32) ((ut8) mem_load_8(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_14 = INV(NON_ZERO(op_AND_13)); + RzILOpEffect *branch_24 = BRANCH(op_INV_14, seq_then_22, seq_else_23); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_24); + return instruction_sequence; +} + +// if (Pt) Rd = memub(Ii) +RzILOpEffect *hex_il_op_l4_ploadrubt_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // Rd = ((st32) ((ut8) mem_load_8(EA))); + RzILOpPure *ml_EA_10 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(8, IL_FALSE, ml_EA_10))); + + // nop; + RzILOpEffect *nop_14 = NOP(); + + // seq(Rd = ((st32) ((ut8) mem_load_8(EA)))); + RzILOpEffect *seq_then_16 = op_ASSIGN_13; + + // seq(nop); + RzILOpEffect *seq_else_17 = nop_14; + + // if ((((st32) Pt) & 0x1)) {seq(Rd = ((st32) ((ut8) mem_load_8(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_8), seq_then_16, seq_else_17); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_18); + return instruction_sequence; +} + +// if (Pv) Rd = memub(Rs+Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Rt << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // Rd = ((st32) ((ut8) mem_load_8(EA))); + RzILOpPure *ml_EA_15 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(8, IL_FALSE, ml_EA_15))); + + // nop; + RzILOpEffect *nop_19 = NOP(); + + // seq(Rd = ((st32) ((ut8) mem_load_8(EA)))); + RzILOpEffect *seq_then_21 = op_ASSIGN_18; + + // seq(nop); + RzILOpEffect *seq_else_22 = nop_19; + + // if ((((st32) Pv) & 0x1)) {seq(Rd = ((st32) ((ut8) mem_load_8(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_23 = BRANCH(NON_ZERO(op_AND_13), seq_then_21, seq_else_22); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_23); + return instruction_sequence; +} + +// if (Pt.new) Rd = memub(Ii) +RzILOpEffect *hex_il_op_l4_ploadrubtnew_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pt_new_op = ISA2REG(hi, 't', true); + RzILOpPure *Pt_new = READ_REG(pkt, Pt_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // Rd = ((st32) ((ut8) mem_load_8(EA))); + RzILOpPure *ml_EA_10 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(8, IL_FALSE, ml_EA_10))); + + // nop; + RzILOpEffect *nop_14 = NOP(); + + // seq(Rd = ((st32) ((ut8) mem_load_8(EA)))); + RzILOpEffect *seq_then_16 = op_ASSIGN_13; + + // seq(nop); + RzILOpEffect *seq_else_17 = nop_14; + + // if ((((st32) Pt_new) & 0x1)) {seq(Rd = ((st32) ((ut8) mem_load_8(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt_new), DUP(Pt_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_8), seq_then_16, seq_else_17); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_18); + return instruction_sequence; +} + +// if (Pv.new) Rd = memub(Rs+Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Rt << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // Rd = ((st32) ((ut8) mem_load_8(EA))); + RzILOpPure *ml_EA_15 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(8, IL_FALSE, ml_EA_15))); + + // nop; + RzILOpEffect *nop_19 = NOP(); + + // seq(Rd = ((st32) ((ut8) mem_load_8(EA)))); + RzILOpEffect *seq_then_21 = op_ASSIGN_18; + + // seq(nop); + RzILOpEffect *seq_else_22 = nop_19; + + // if ((((st32) Pv_new) & 0x1)) {seq(Rd = ((st32) ((ut8) mem_load_8(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_23 = BRANCH(NON_ZERO(op_AND_13), seq_then_21, seq_else_22); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_23); + return instruction_sequence; +} + +// if (!Pt) Rd = memuh(Ii) +RzILOpEffect *hex_il_op_l4_ploadruhf_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // Rd = ((st32) ((ut16) mem_load_16(EA))); + RzILOpPure *ml_EA_11 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_14 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(16, IL_FALSE, ml_EA_11))); + + // nop; + RzILOpEffect *nop_15 = NOP(); + + // seq(Rd = ((st32) ((ut16) mem_load_16(EA)))); + RzILOpEffect *seq_then_17 = op_ASSIGN_14; + + // seq(nop); + RzILOpEffect *seq_else_18 = nop_15; + + // if (! (((st32) Pt) & 0x1)) {seq(Rd = ((st32) ((ut16) mem_load_16(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_19 = BRANCH(op_INV_9, seq_then_17, seq_else_18); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_19); + return instruction_sequence; +} + +// if (!Pv) Rd = memuh(Rs+Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Rt << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // Rd = ((st32) ((ut16) mem_load_16(EA))); + RzILOpPure *ml_EA_16 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_19 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(16, IL_FALSE, ml_EA_16))); + + // nop; + RzILOpEffect *nop_20 = NOP(); + + // seq(Rd = ((st32) ((ut16) mem_load_16(EA)))); + RzILOpEffect *seq_then_22 = op_ASSIGN_19; + + // seq(nop); + RzILOpEffect *seq_else_23 = nop_20; + + // if (! (((st32) Pv) & 0x1)) {seq(Rd = ((st32) ((ut16) mem_load_16(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_14 = INV(NON_ZERO(op_AND_13)); + RzILOpEffect *branch_24 = BRANCH(op_INV_14, seq_then_22, seq_else_23); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_24); + return instruction_sequence; +} + +// if (!Pt.new) Rd = memuh(Ii) +RzILOpEffect *hex_il_op_l4_ploadruhfnew_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pt_new_op = ISA2REG(hi, 't', true); + RzILOpPure *Pt_new = READ_REG(pkt, Pt_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // Rd = ((st32) ((ut16) mem_load_16(EA))); + RzILOpPure *ml_EA_11 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_14 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(16, IL_FALSE, ml_EA_11))); + + // nop; + RzILOpEffect *nop_15 = NOP(); + + // seq(Rd = ((st32) ((ut16) mem_load_16(EA)))); + RzILOpEffect *seq_then_17 = op_ASSIGN_14; + + // seq(nop); + RzILOpEffect *seq_else_18 = nop_15; + + // if (! (((st32) Pt_new) & 0x1)) {seq(Rd = ((st32) ((ut16) mem_load_16(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt_new), DUP(Pt_new)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_19 = BRANCH(op_INV_9, seq_then_17, seq_else_18); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_19); + return instruction_sequence; +} + +// if (!Pv.new) Rd = memuh(Rs+Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Rt << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // Rd = ((st32) ((ut16) mem_load_16(EA))); + RzILOpPure *ml_EA_16 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_19 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(16, IL_FALSE, ml_EA_16))); + + // nop; + RzILOpEffect *nop_20 = NOP(); + + // seq(Rd = ((st32) ((ut16) mem_load_16(EA)))); + RzILOpEffect *seq_then_22 = op_ASSIGN_19; + + // seq(nop); + RzILOpEffect *seq_else_23 = nop_20; + + // if (! (((st32) Pv_new) & 0x1)) {seq(Rd = ((st32) ((ut16) mem_load_16(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_14 = INV(NON_ZERO(op_AND_13)); + RzILOpEffect *branch_24 = BRANCH(op_INV_14, seq_then_22, seq_else_23); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_24); + return instruction_sequence; +} + +// if (Pt) Rd = memuh(Ii) +RzILOpEffect *hex_il_op_l4_ploadruht_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pt_op = ISA2REG(hi, 't', false); + RzILOpPure *Pt = READ_REG(pkt, Pt_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // Rd = ((st32) ((ut16) mem_load_16(EA))); + RzILOpPure *ml_EA_10 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(16, IL_FALSE, ml_EA_10))); + + // nop; + RzILOpEffect *nop_14 = NOP(); + + // seq(Rd = ((st32) ((ut16) mem_load_16(EA)))); + RzILOpEffect *seq_then_16 = op_ASSIGN_13; + + // seq(nop); + RzILOpEffect *seq_else_17 = nop_14; + + // if ((((st32) Pt) & 0x1)) {seq(Rd = ((st32) ((ut16) mem_load_16(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt), DUP(Pt)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_8), seq_then_16, seq_else_17); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_18); + return instruction_sequence; +} + +// if (Pv) Rd = memuh(Rs+Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Rt << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // Rd = ((st32) ((ut16) mem_load_16(EA))); + RzILOpPure *ml_EA_15 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(16, IL_FALSE, ml_EA_15))); + + // nop; + RzILOpEffect *nop_19 = NOP(); + + // seq(Rd = ((st32) ((ut16) mem_load_16(EA)))); + RzILOpEffect *seq_then_21 = op_ASSIGN_18; + + // seq(nop); + RzILOpEffect *seq_else_22 = nop_19; + + // if ((((st32) Pv) & 0x1)) {seq(Rd = ((st32) ((ut16) mem_load_16(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_23 = BRANCH(NON_ZERO(op_AND_13), seq_then_21, seq_else_22); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_23); + return instruction_sequence; +} + +// if (Pt.new) Rd = memuh(Ii) +RzILOpEffect *hex_il_op_l4_ploadruhtnew_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pt_new_op = ISA2REG(hi, 't', true); + RzILOpPure *Pt_new = READ_REG(pkt, Pt_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // Rd = ((st32) ((ut16) mem_load_16(EA))); + RzILOpPure *ml_EA_10 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(16, IL_FALSE, ml_EA_10))); + + // nop; + RzILOpEffect *nop_14 = NOP(); + + // seq(Rd = ((st32) ((ut16) mem_load_16(EA)))); + RzILOpEffect *seq_then_16 = op_ASSIGN_13; + + // seq(nop); + RzILOpEffect *seq_else_17 = nop_14; + + // if ((((st32) Pt_new) & 0x1)) {seq(Rd = ((st32) ((ut16) mem_load_16(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pt_new), DUP(Pt_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_8), seq_then_16, seq_else_17); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_18); + return instruction_sequence; +} + +// if (Pv.new) Rd = memuh(Rs+Rt<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Rt << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Rt, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // Rd = ((st32) ((ut16) mem_load_16(EA))); + RzILOpPure *ml_EA_15 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(16, IL_FALSE, ml_EA_15))); + + // nop; + RzILOpEffect *nop_19 = NOP(); + + // seq(Rd = ((st32) ((ut16) mem_load_16(EA)))); + RzILOpEffect *seq_then_21 = op_ASSIGN_18; + + // seq(nop); + RzILOpEffect *seq_else_22 = nop_19; + + // if ((((st32) Pv_new) & 0x1)) {seq(Rd = ((st32) ((ut16) mem_load_16(EA))))} else {seq(nop)}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_23 = BRANCH(NON_ZERO(op_AND_13), seq_then_21, seq_else_22); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_23); + return instruction_sequence; +} + +// Rdd = dealloc_return(Rs):raw +RzILOpEffect *hex_il_op_l4_return(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut64 tmp; + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp framekey_op = ALIAS2OP(HEX_REG_ALIAS_FRAMEKEY, false); + RzILOpPure *framekey = READ_REG(pkt, &framekey_op, false); + const HexOp sp_op = ALIAS2OP(HEX_REG_ALIAS_SP, false); + + // EA = ((ut32) Rs); + RzILOpEffect *op_ASSIGN_4 = SETL("EA", CAST(32, IL_FALSE, Rs)); + + // tmp = ((ut64) mem_load_64(EA)); + RzILOpPure *ml_EA_6 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_8 = SETL("tmp", CAST(64, IL_FALSE, ml_EA_6)); + + // Rdd = ((st64) (tmp ^ (((ut64) framekey) << 0x20))); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(CAST(64, IL_FALSE, framekey), SN(32, 0x20)); + RzILOpPure *op_XOR_14 = LOGXOR(VARL("tmp"), op_LSHIFT_13); + RzILOpEffect *op_ASSIGN_16 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_XOR_14)); + + // sp = EA + ((ut32) 0x8); + RzILOpPure *op_ADD_20 = ADD(VARL("EA"), CAST(32, IL_FALSE, SN(32, 8))); + RzILOpEffect *op_ASSIGN_21 = WRITE_REG(bundle, &sp_op, op_ADD_20); + + // jump(((ut32) ((st64) ((st32) ((Rdd >> 0x20) & 0xffffffff))))); + RzILOpPure *op_RSHIFT_25 = SHIFTRA(READ_REG(pkt, Rdd_op, true), SN(32, 0x20)); + RzILOpPure *op_AND_27 = LOGAND(op_RSHIFT_25, SN(64, 0xffffffff)); + RzILOpEffect *jump_cast_ut32_30_31 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", CAST(32, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_27), DUP(op_AND_27))), CAST(32, MSB(DUP(op_AND_27)), DUP(op_AND_27)))))); + + RzILOpEffect *instruction_sequence = SEQN(5, op_ASSIGN_4, op_ASSIGN_8, op_ASSIGN_16, op_ASSIGN_21, jump_cast_ut32_30_31); + return instruction_sequence; +} + +// if (!Pv) Rdd = dealloc_return(Rs):raw +RzILOpEffect *hex_il_op_l4_return_f(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut64 tmp; + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp framekey_op = ALIAS2OP(HEX_REG_ALIAS_FRAMEKEY, false); + RzILOpPure *framekey = READ_REG(pkt, &framekey_op, false); + const HexOp sp_op = ALIAS2OP(HEX_REG_ALIAS_SP, false); + + // EA = ((ut32) Rs); + RzILOpEffect *op_ASSIGN_5 = SETL("EA", CAST(32, IL_FALSE, Rs)); + + // tmp = ((ut64) mem_load_64(EA)); + RzILOpPure *ml_EA_12 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_14 = SETL("tmp", CAST(64, IL_FALSE, ml_EA_12)); + + // Rdd = ((st64) (tmp ^ (((ut64) framekey) << 0x20))); + RzILOpPure *op_LSHIFT_19 = SHIFTL0(CAST(64, IL_FALSE, framekey), SN(32, 0x20)); + RzILOpPure *op_XOR_20 = LOGXOR(VARL("tmp"), op_LSHIFT_19); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_XOR_20)); + + // sp = EA + ((ut32) 0x8); + RzILOpPure *op_ADD_26 = ADD(VARL("EA"), CAST(32, IL_FALSE, SN(32, 8))); + RzILOpEffect *op_ASSIGN_27 = WRITE_REG(bundle, &sp_op, op_ADD_26); + + // jump(((ut32) ((st64) ((st32) ((Rdd >> 0x20) & 0xffffffff))))); + RzILOpPure *op_RSHIFT_31 = SHIFTRA(READ_REG(pkt, Rdd_op, true), SN(32, 0x20)); + RzILOpPure *op_AND_33 = LOGAND(op_RSHIFT_31, SN(64, 0xffffffff)); + RzILOpEffect *jump_cast_ut32_36_37 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", CAST(32, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_33), DUP(op_AND_33))), CAST(32, MSB(DUP(op_AND_33)), DUP(op_AND_33)))))); + + // nop; + RzILOpEffect *nop_39 = NOP(); + + // seq(tmp = ((ut64) mem_load_64(EA)); Rdd = ((st64) (tmp ^ (((ut64 ...; + RzILOpEffect *seq_then_41 = SEQN(4, op_ASSIGN_14, op_ASSIGN_22, op_ASSIGN_27, jump_cast_ut32_36_37); + + // seq(nop); + RzILOpEffect *seq_else_42 = nop_39; + + // if (! (((st32) Pv) & 0x1)) {seq(tmp = ((ut64) mem_load_64(EA)); Rdd = ((st64) (tmp ^ (((ut64 ...} else {seq(nop)}; + RzILOpPure *op_AND_10 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_11 = INV(NON_ZERO(op_AND_10)); + RzILOpEffect *branch_43 = BRANCH(op_INV_11, seq_then_41, seq_else_42); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_5, branch_43); + return instruction_sequence; +} + +// if (!Pv.new) Rdd = dealloc_return(Rs):nt:raw +RzILOpEffect *hex_il_op_l4_return_fnew_pnt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut64 tmp; + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp framekey_op = ALIAS2OP(HEX_REG_ALIAS_FRAMEKEY, false); + RzILOpPure *framekey = READ_REG(pkt, &framekey_op, false); + const HexOp sp_op = ALIAS2OP(HEX_REG_ALIAS_SP, false); + + // EA = ((ut32) Rs); + RzILOpEffect *op_ASSIGN_5 = SETL("EA", CAST(32, IL_FALSE, Rs)); + + // tmp = ((ut64) mem_load_64(EA)); + RzILOpPure *ml_EA_12 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_14 = SETL("tmp", CAST(64, IL_FALSE, ml_EA_12)); + + // Rdd = ((st64) (tmp ^ (((ut64) framekey) << 0x20))); + RzILOpPure *op_LSHIFT_19 = SHIFTL0(CAST(64, IL_FALSE, framekey), SN(32, 0x20)); + RzILOpPure *op_XOR_20 = LOGXOR(VARL("tmp"), op_LSHIFT_19); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_XOR_20)); + + // sp = EA + ((ut32) 0x8); + RzILOpPure *op_ADD_26 = ADD(VARL("EA"), CAST(32, IL_FALSE, SN(32, 8))); + RzILOpEffect *op_ASSIGN_27 = WRITE_REG(bundle, &sp_op, op_ADD_26); + + // jump(((ut32) ((st64) ((st32) ((Rdd >> 0x20) & 0xffffffff))))); + RzILOpPure *op_RSHIFT_31 = SHIFTRA(READ_REG(pkt, Rdd_op, true), SN(32, 0x20)); + RzILOpPure *op_AND_33 = LOGAND(op_RSHIFT_31, SN(64, 0xffffffff)); + RzILOpEffect *jump_cast_ut32_36_37 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", CAST(32, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_33), DUP(op_AND_33))), CAST(32, MSB(DUP(op_AND_33)), DUP(op_AND_33)))))); + + // nop; + RzILOpEffect *nop_39 = NOP(); + + // seq(tmp = ((ut64) mem_load_64(EA)); Rdd = ((st64) (tmp ^ (((ut64 ...; + RzILOpEffect *seq_then_41 = SEQN(4, op_ASSIGN_14, op_ASSIGN_22, op_ASSIGN_27, jump_cast_ut32_36_37); + + // seq(nop); + RzILOpEffect *seq_else_42 = nop_39; + + // if (! (((st32) Pv_new) & 0x1)) {seq(tmp = ((ut64) mem_load_64(EA)); Rdd = ((st64) (tmp ^ (((ut64 ...} else {seq(nop)}; + RzILOpPure *op_AND_10 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_11 = INV(NON_ZERO(op_AND_10)); + RzILOpEffect *branch_43 = BRANCH(op_INV_11, seq_then_41, seq_else_42); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_5, branch_43); + return instruction_sequence; +} + +// if (!Pv.new) Rdd = dealloc_return(Rs):t:raw +RzILOpEffect *hex_il_op_l4_return_fnew_pt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut64 tmp; + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp framekey_op = ALIAS2OP(HEX_REG_ALIAS_FRAMEKEY, false); + RzILOpPure *framekey = READ_REG(pkt, &framekey_op, false); + const HexOp sp_op = ALIAS2OP(HEX_REG_ALIAS_SP, false); + + // EA = ((ut32) Rs); + RzILOpEffect *op_ASSIGN_5 = SETL("EA", CAST(32, IL_FALSE, Rs)); + + // tmp = ((ut64) mem_load_64(EA)); + RzILOpPure *ml_EA_12 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_14 = SETL("tmp", CAST(64, IL_FALSE, ml_EA_12)); + + // Rdd = ((st64) (tmp ^ (((ut64) framekey) << 0x20))); + RzILOpPure *op_LSHIFT_19 = SHIFTL0(CAST(64, IL_FALSE, framekey), SN(32, 0x20)); + RzILOpPure *op_XOR_20 = LOGXOR(VARL("tmp"), op_LSHIFT_19); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_XOR_20)); + + // sp = EA + ((ut32) 0x8); + RzILOpPure *op_ADD_26 = ADD(VARL("EA"), CAST(32, IL_FALSE, SN(32, 8))); + RzILOpEffect *op_ASSIGN_27 = WRITE_REG(bundle, &sp_op, op_ADD_26); + + // jump(((ut32) ((st64) ((st32) ((Rdd >> 0x20) & 0xffffffff))))); + RzILOpPure *op_RSHIFT_31 = SHIFTRA(READ_REG(pkt, Rdd_op, true), SN(32, 0x20)); + RzILOpPure *op_AND_33 = LOGAND(op_RSHIFT_31, SN(64, 0xffffffff)); + RzILOpEffect *jump_cast_ut32_36_37 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", CAST(32, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_33), DUP(op_AND_33))), CAST(32, MSB(DUP(op_AND_33)), DUP(op_AND_33)))))); + + // nop; + RzILOpEffect *nop_39 = NOP(); + + // seq(tmp = ((ut64) mem_load_64(EA)); Rdd = ((st64) (tmp ^ (((ut64 ...; + RzILOpEffect *seq_then_41 = SEQN(4, op_ASSIGN_14, op_ASSIGN_22, op_ASSIGN_27, jump_cast_ut32_36_37); + + // seq(nop); + RzILOpEffect *seq_else_42 = nop_39; + + // if (! (((st32) Pv_new) & 0x1)) {seq(tmp = ((ut64) mem_load_64(EA)); Rdd = ((st64) (tmp ^ (((ut64 ...} else {seq(nop)}; + RzILOpPure *op_AND_10 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_11 = INV(NON_ZERO(op_AND_10)); + RzILOpEffect *branch_43 = BRANCH(op_INV_11, seq_then_41, seq_else_42); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_5, branch_43); + return instruction_sequence; +} + +// if (Pv) Rdd = dealloc_return(Rs):raw +RzILOpEffect *hex_il_op_l4_return_t(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut64 tmp; + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp framekey_op = ALIAS2OP(HEX_REG_ALIAS_FRAMEKEY, false); + RzILOpPure *framekey = READ_REG(pkt, &framekey_op, false); + const HexOp sp_op = ALIAS2OP(HEX_REG_ALIAS_SP, false); + + // EA = ((ut32) Rs); + RzILOpEffect *op_ASSIGN_5 = SETL("EA", CAST(32, IL_FALSE, Rs)); + + // tmp = ((ut64) mem_load_64(EA)); + RzILOpPure *ml_EA_11 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_13 = SETL("tmp", CAST(64, IL_FALSE, ml_EA_11)); + + // Rdd = ((st64) (tmp ^ (((ut64) framekey) << 0x20))); + RzILOpPure *op_LSHIFT_18 = SHIFTL0(CAST(64, IL_FALSE, framekey), SN(32, 0x20)); + RzILOpPure *op_XOR_19 = LOGXOR(VARL("tmp"), op_LSHIFT_18); + RzILOpEffect *op_ASSIGN_21 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_XOR_19)); + + // sp = EA + ((ut32) 0x8); + RzILOpPure *op_ADD_25 = ADD(VARL("EA"), CAST(32, IL_FALSE, SN(32, 8))); + RzILOpEffect *op_ASSIGN_26 = WRITE_REG(bundle, &sp_op, op_ADD_25); + + // jump(((ut32) ((st64) ((st32) ((Rdd >> 0x20) & 0xffffffff))))); + RzILOpPure *op_RSHIFT_30 = SHIFTRA(READ_REG(pkt, Rdd_op, true), SN(32, 0x20)); + RzILOpPure *op_AND_32 = LOGAND(op_RSHIFT_30, SN(64, 0xffffffff)); + RzILOpEffect *jump_cast_ut32_35_36 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", CAST(32, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_32), DUP(op_AND_32))), CAST(32, MSB(DUP(op_AND_32)), DUP(op_AND_32)))))); + + // nop; + RzILOpEffect *nop_38 = NOP(); + + // seq(tmp = ((ut64) mem_load_64(EA)); Rdd = ((st64) (tmp ^ (((ut64 ...; + RzILOpEffect *seq_then_40 = SEQN(4, op_ASSIGN_13, op_ASSIGN_21, op_ASSIGN_26, jump_cast_ut32_35_36); + + // seq(nop); + RzILOpEffect *seq_else_41 = nop_38; + + // if ((((st32) Pv) & 0x1)) {seq(tmp = ((ut64) mem_load_64(EA)); Rdd = ((st64) (tmp ^ (((ut64 ...} else {seq(nop)}; + RzILOpPure *op_AND_10 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_42 = BRANCH(NON_ZERO(op_AND_10), seq_then_40, seq_else_41); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_5, branch_42); + return instruction_sequence; +} + +// if (Pv.new) Rdd = dealloc_return(Rs):nt:raw +RzILOpEffect *hex_il_op_l4_return_tnew_pnt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut64 tmp; + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp framekey_op = ALIAS2OP(HEX_REG_ALIAS_FRAMEKEY, false); + RzILOpPure *framekey = READ_REG(pkt, &framekey_op, false); + const HexOp sp_op = ALIAS2OP(HEX_REG_ALIAS_SP, false); + + // EA = ((ut32) Rs); + RzILOpEffect *op_ASSIGN_5 = SETL("EA", CAST(32, IL_FALSE, Rs)); + + // tmp = ((ut64) mem_load_64(EA)); + RzILOpPure *ml_EA_11 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_13 = SETL("tmp", CAST(64, IL_FALSE, ml_EA_11)); + + // Rdd = ((st64) (tmp ^ (((ut64) framekey) << 0x20))); + RzILOpPure *op_LSHIFT_18 = SHIFTL0(CAST(64, IL_FALSE, framekey), SN(32, 0x20)); + RzILOpPure *op_XOR_19 = LOGXOR(VARL("tmp"), op_LSHIFT_18); + RzILOpEffect *op_ASSIGN_21 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_XOR_19)); + + // sp = EA + ((ut32) 0x8); + RzILOpPure *op_ADD_25 = ADD(VARL("EA"), CAST(32, IL_FALSE, SN(32, 8))); + RzILOpEffect *op_ASSIGN_26 = WRITE_REG(bundle, &sp_op, op_ADD_25); + + // jump(((ut32) ((st64) ((st32) ((Rdd >> 0x20) & 0xffffffff))))); + RzILOpPure *op_RSHIFT_30 = SHIFTRA(READ_REG(pkt, Rdd_op, true), SN(32, 0x20)); + RzILOpPure *op_AND_32 = LOGAND(op_RSHIFT_30, SN(64, 0xffffffff)); + RzILOpEffect *jump_cast_ut32_35_36 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", CAST(32, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_32), DUP(op_AND_32))), CAST(32, MSB(DUP(op_AND_32)), DUP(op_AND_32)))))); + + // nop; + RzILOpEffect *nop_38 = NOP(); + + // seq(tmp = ((ut64) mem_load_64(EA)); Rdd = ((st64) (tmp ^ (((ut64 ...; + RzILOpEffect *seq_then_40 = SEQN(4, op_ASSIGN_13, op_ASSIGN_21, op_ASSIGN_26, jump_cast_ut32_35_36); + + // seq(nop); + RzILOpEffect *seq_else_41 = nop_38; + + // if ((((st32) Pv_new) & 0x1)) {seq(tmp = ((ut64) mem_load_64(EA)); Rdd = ((st64) (tmp ^ (((ut64 ...} else {seq(nop)}; + RzILOpPure *op_AND_10 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_42 = BRANCH(NON_ZERO(op_AND_10), seq_then_40, seq_else_41); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_5, branch_42); + return instruction_sequence; +} + +// if (Pv.new) Rdd = dealloc_return(Rs):t:raw +RzILOpEffect *hex_il_op_l4_return_tnew_pt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut64 tmp; + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp framekey_op = ALIAS2OP(HEX_REG_ALIAS_FRAMEKEY, false); + RzILOpPure *framekey = READ_REG(pkt, &framekey_op, false); + const HexOp sp_op = ALIAS2OP(HEX_REG_ALIAS_SP, false); + + // EA = ((ut32) Rs); + RzILOpEffect *op_ASSIGN_5 = SETL("EA", CAST(32, IL_FALSE, Rs)); + + // tmp = ((ut64) mem_load_64(EA)); + RzILOpPure *ml_EA_11 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_13 = SETL("tmp", CAST(64, IL_FALSE, ml_EA_11)); + + // Rdd = ((st64) (tmp ^ (((ut64) framekey) << 0x20))); + RzILOpPure *op_LSHIFT_18 = SHIFTL0(CAST(64, IL_FALSE, framekey), SN(32, 0x20)); + RzILOpPure *op_XOR_19 = LOGXOR(VARL("tmp"), op_LSHIFT_18); + RzILOpEffect *op_ASSIGN_21 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_XOR_19)); + + // sp = EA + ((ut32) 0x8); + RzILOpPure *op_ADD_25 = ADD(VARL("EA"), CAST(32, IL_FALSE, SN(32, 8))); + RzILOpEffect *op_ASSIGN_26 = WRITE_REG(bundle, &sp_op, op_ADD_25); + + // jump(((ut32) ((st64) ((st32) ((Rdd >> 0x20) & 0xffffffff))))); + RzILOpPure *op_RSHIFT_30 = SHIFTRA(READ_REG(pkt, Rdd_op, true), SN(32, 0x20)); + RzILOpPure *op_AND_32 = LOGAND(op_RSHIFT_30, SN(64, 0xffffffff)); + RzILOpEffect *jump_cast_ut32_35_36 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", CAST(32, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_32), DUP(op_AND_32))), CAST(32, MSB(DUP(op_AND_32)), DUP(op_AND_32)))))); + + // nop; + RzILOpEffect *nop_38 = NOP(); + + // seq(tmp = ((ut64) mem_load_64(EA)); Rdd = ((st64) (tmp ^ (((ut64 ...; + RzILOpEffect *seq_then_40 = SEQN(4, op_ASSIGN_13, op_ASSIGN_21, op_ASSIGN_26, jump_cast_ut32_35_36); + + // seq(nop); + RzILOpEffect *seq_else_41 = nop_38; + + // if ((((st32) Pv_new) & 0x1)) {seq(tmp = ((ut64) mem_load_64(EA)); Rdd = ((st64) (tmp ^ (((ut64 ...} else {seq(nop)}; + RzILOpPure *op_AND_10 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_42 = BRANCH(NON_ZERO(op_AND_10), seq_then_40, seq_else_41); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_5, branch_42); + return instruction_sequence; +} + +// memb(Rs+Ii) -= Rt +RzILOpEffect *hex_il_op_l4_sub_memopb_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + // Declare: st32 tmp; + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // tmp = ((st32) ((st8) mem_load_8(EA))); + RzILOpPure *ml_EA_9 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_12 = SETL("tmp", CAST(32, MSB(CAST(8, MSB(ml_EA_9), DUP(ml_EA_9))), CAST(8, MSB(DUP(ml_EA_9)), DUP(ml_EA_9)))); + + // tmp = tmp - Rt; + RzILOpPure *op_SUB_14 = SUB(VARL("tmp"), Rt); + RzILOpEffect *op_ASSIGN_SUB_15 = SETL("tmp", op_SUB_14); + + // mem_store_ut8(EA, ((ut8) tmp)); + RzILOpEffect *ms_cast_ut8_16_17 = STOREW(VARL("EA"), CAST(8, IL_FALSE, VARL("tmp"))); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_0, op_ASSIGN_6, op_ASSIGN_12, op_ASSIGN_SUB_15, ms_cast_ut8_16_17); + return instruction_sequence; +} + +// memh(Rs+Ii) -= Rt +RzILOpEffect *hex_il_op_l4_sub_memoph_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + // Declare: st32 tmp; + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // tmp = ((st32) ((st16) mem_load_16(EA))); + RzILOpPure *ml_EA_9 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_12 = SETL("tmp", CAST(32, MSB(CAST(16, MSB(ml_EA_9), DUP(ml_EA_9))), CAST(16, MSB(DUP(ml_EA_9)), DUP(ml_EA_9)))); + + // tmp = tmp - Rt; + RzILOpPure *op_SUB_14 = SUB(VARL("tmp"), Rt); + RzILOpEffect *op_ASSIGN_SUB_15 = SETL("tmp", op_SUB_14); + + // mem_store_ut16(EA, ((ut16) tmp)); + RzILOpEffect *ms_cast_ut16_16_17 = STOREW(VARL("EA"), CAST(16, IL_FALSE, VARL("tmp"))); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_0, op_ASSIGN_6, op_ASSIGN_12, op_ASSIGN_SUB_15, ms_cast_ut16_16_17); + return instruction_sequence; +} + +// memw(Rs+Ii) -= Rt +RzILOpEffect *hex_il_op_l4_sub_memopw_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + // Declare: st32 tmp; + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // tmp = ((st32) mem_load_32(EA)); + RzILOpPure *ml_EA_9 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_11 = SETL("tmp", CAST(32, MSB(ml_EA_9), DUP(ml_EA_9))); + + // tmp = tmp - Rt; + RzILOpPure *op_SUB_13 = SUB(VARL("tmp"), Rt); + RzILOpEffect *op_ASSIGN_SUB_14 = SETL("tmp", op_SUB_13); + + // mem_store_ut32(EA, ((ut32) tmp)); + RzILOpEffect *ms_cast_ut32_15_16 = STOREW(VARL("EA"), CAST(32, IL_FALSE, VARL("tmp"))); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_0, op_ASSIGN_6, op_ASSIGN_11, op_ASSIGN_SUB_14, ms_cast_ut32_15_16); + return instruction_sequence; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_L6_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_L6_ops.c new file mode 100644 index 00000000000..c4e4aea7284 --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_L6_ops.c @@ -0,0 +1,22 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// memcpy(Rs,Rt,Mu) +RzILOpEffect *hex_il_op_l6_memcpy(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_M2_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_M2_ops.c new file mode 100644 index 00000000000..190ab7ef046 --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_M2_ops.c @@ -0,0 +1,20239 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// Rx += add(Rs,Rt) +RzILOpEffect *hex_il_op_m2_acci(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = Rx + Rs + Rt; + RzILOpPure *op_ADD_2 = ADD(READ_REG(pkt, Rx_op, false), Rs); + RzILOpPure *op_ADD_4 = ADD(op_ADD_2, Rt); + RzILOpEffect *op_ASSIGN_5 = WRITE_REG(bundle, Rx_op, op_ADD_4); + + RzILOpEffect *instruction_sequence = op_ASSIGN_5; + return instruction_sequence; +} + +// Rx += add(Rs,Ii) +RzILOpEffect *hex_il_op_m2_accii(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // Rx = Rx + Rs + s; + RzILOpPure *op_ADD_4 = ADD(READ_REG(pkt, Rx_op, false), Rs); + RzILOpPure *op_ADD_5 = ADD(op_ADD_4, VARL("s")); + RzILOpEffect *op_ASSIGN_6 = WRITE_REG(bundle, Rx_op, op_ADD_5); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_0, op_ASSIGN_6); + return instruction_sequence; +} + +// Rxx += cmpyi(Rs,Rt) +RzILOpEffect *hex_il_op_m2_cmaci_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = Rxx + ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) + ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_ADD_21 = ADD(READ_REG(pkt, Rxx_op, false), CAST(64, MSB(op_MUL_19), DUP(op_MUL_19))); + RzILOpPure *op_RSHIFT_25 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_27 = LOGAND(op_RSHIFT_25, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_33 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_35 = LOGAND(op_RSHIFT_33, SN(32, 0xffff)); + RzILOpPure *op_MUL_38 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_27), DUP(op_AND_27))), CAST(16, MSB(DUP(op_AND_27)), DUP(op_AND_27))), CAST(32, MSB(CAST(16, MSB(op_AND_35), DUP(op_AND_35))), CAST(16, MSB(DUP(op_AND_35)), DUP(op_AND_35)))); + RzILOpPure *op_ADD_40 = ADD(op_ADD_21, CAST(64, MSB(op_MUL_38), DUP(op_MUL_38))); + RzILOpEffect *op_ASSIGN_41 = WRITE_REG(bundle, Rxx_op, op_ADD_40); + + RzILOpEffect *instruction_sequence = op_ASSIGN_41; + return instruction_sequence; +} + +// Rxx += cmpyr(Rs,Rt) +RzILOpEffect *hex_il_op_m2_cmacr_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = Rxx + ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) - ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_ADD_21 = ADD(READ_REG(pkt, Rxx_op, false), CAST(64, MSB(op_MUL_19), DUP(op_MUL_19))); + RzILOpPure *op_RSHIFT_25 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_27 = LOGAND(op_RSHIFT_25, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_33 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_35 = LOGAND(op_RSHIFT_33, SN(32, 0xffff)); + RzILOpPure *op_MUL_38 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_27), DUP(op_AND_27))), CAST(16, MSB(DUP(op_AND_27)), DUP(op_AND_27))), CAST(32, MSB(CAST(16, MSB(op_AND_35), DUP(op_AND_35))), CAST(16, MSB(DUP(op_AND_35)), DUP(op_AND_35)))); + RzILOpPure *op_SUB_40 = SUB(op_ADD_21, CAST(64, MSB(op_MUL_38), DUP(op_MUL_38))); + RzILOpEffect *op_ASSIGN_41 = WRITE_REG(bundle, Rxx_op, op_SUB_40); + + RzILOpEffect *instruction_sequence = op_ASSIGN_41; + return instruction_sequence; +} + +// Rxx += cmpy(Rs,Rt):sat +RzILOpEffect *hex_il_op_m2_cmacs_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_171 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_32 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_34 = LOGAND(op_RSHIFT_32, SN(32, 0xffff)); + RzILOpPure *op_MUL_37 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_25), DUP(op_AND_25))), CAST(16, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(CAST(16, MSB(op_AND_34), DUP(op_AND_34))), CAST(16, MSB(DUP(op_AND_34)), DUP(op_AND_34)))); + RzILOpPure *op_LSHIFT_40 = SHIFTL0(CAST(64, MSB(op_MUL_37), DUP(op_MUL_37)), SN(32, 0)); + RzILOpPure *op_ADD_41 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_16), DUP(op_AND_16))), CAST(32, MSB(DUP(op_AND_16)), DUP(op_AND_16))), op_LSHIFT_40); + RzILOpPure *op_RSHIFT_45 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_47 = LOGAND(op_RSHIFT_45, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_53 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_55 = LOGAND(op_RSHIFT_53, SN(32, 0xffff)); + RzILOpPure *op_MUL_58 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_47), DUP(op_AND_47))), CAST(16, MSB(DUP(op_AND_47)), DUP(op_AND_47))), CAST(32, MSB(CAST(16, MSB(op_AND_55), DUP(op_AND_55))), CAST(16, MSB(DUP(op_AND_55)), DUP(op_AND_55)))); + RzILOpPure *op_LSHIFT_61 = SHIFTL0(CAST(64, MSB(op_MUL_58), DUP(op_MUL_58)), SN(32, 0)); + RzILOpPure *op_ADD_62 = ADD(op_ADD_41, op_LSHIFT_61); + RzILOpPure *op_RSHIFT_71 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_73 = LOGAND(op_RSHIFT_71, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_79 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_81 = LOGAND(op_RSHIFT_79, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_87 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_89 = LOGAND(op_RSHIFT_87, SN(32, 0xffff)); + RzILOpPure *op_MUL_92 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_81), DUP(op_AND_81))), CAST(16, MSB(DUP(op_AND_81)), DUP(op_AND_81))), CAST(32, MSB(CAST(16, MSB(op_AND_89), DUP(op_AND_89))), CAST(16, MSB(DUP(op_AND_89)), DUP(op_AND_89)))); + RzILOpPure *op_LSHIFT_95 = SHIFTL0(CAST(64, MSB(op_MUL_92), DUP(op_MUL_92)), SN(32, 0)); + RzILOpPure *op_ADD_96 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_73), DUP(op_AND_73))), CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73))), op_LSHIFT_95); + RzILOpPure *op_RSHIFT_100 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_102 = LOGAND(op_RSHIFT_100, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_108 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_110 = LOGAND(op_RSHIFT_108, SN(32, 0xffff)); + RzILOpPure *op_MUL_113 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_102), DUP(op_AND_102))), CAST(16, MSB(DUP(op_AND_102)), DUP(op_AND_102))), CAST(32, MSB(CAST(16, MSB(op_AND_110), DUP(op_AND_110))), CAST(16, MSB(DUP(op_AND_110)), DUP(op_AND_110)))); + RzILOpPure *op_LSHIFT_116 = SHIFTL0(CAST(64, MSB(op_MUL_113), DUP(op_MUL_113)), SN(32, 0)); + RzILOpPure *op_ADD_117 = ADD(op_ADD_96, op_LSHIFT_116); + RzILOpPure *op_EQ_118 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_62), SN(32, 0), SN(32, 0x20)), op_ADD_117); + RzILOpPure *op_RSHIFT_175 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_177 = LOGAND(op_RSHIFT_175, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_183 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_185 = LOGAND(op_RSHIFT_183, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_191 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_193 = LOGAND(op_RSHIFT_191, SN(32, 0xffff)); + RzILOpPure *op_MUL_196 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_185), DUP(op_AND_185))), CAST(16, MSB(DUP(op_AND_185)), DUP(op_AND_185))), CAST(32, MSB(CAST(16, MSB(op_AND_193), DUP(op_AND_193))), CAST(16, MSB(DUP(op_AND_193)), DUP(op_AND_193)))); + RzILOpPure *op_LSHIFT_199 = SHIFTL0(CAST(64, MSB(op_MUL_196), DUP(op_MUL_196)), SN(32, 0)); + RzILOpPure *op_ADD_200 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_177), DUP(op_AND_177))), CAST(32, MSB(DUP(op_AND_177)), DUP(op_AND_177))), op_LSHIFT_199); + RzILOpPure *op_RSHIFT_204 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_206 = LOGAND(op_RSHIFT_204, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_212 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_214 = LOGAND(op_RSHIFT_212, SN(32, 0xffff)); + RzILOpPure *op_MUL_217 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_206), DUP(op_AND_206))), CAST(16, MSB(DUP(op_AND_206)), DUP(op_AND_206))), CAST(32, MSB(CAST(16, MSB(op_AND_214), DUP(op_AND_214))), CAST(16, MSB(DUP(op_AND_214)), DUP(op_AND_214)))); + RzILOpPure *op_LSHIFT_220 = SHIFTL0(CAST(64, MSB(op_MUL_217), DUP(op_MUL_217)), SN(32, 0)); + RzILOpPure *op_ADD_221 = ADD(op_ADD_200, op_LSHIFT_220); + RzILOpPure *op_LT_224 = SLT(op_ADD_221, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_229 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_230 = NEG(op_LSHIFT_229); + RzILOpPure *op_LSHIFT_235 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_238 = SUB(op_LSHIFT_235, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_239 = ITE(op_LT_224, op_NEG_230, op_SUB_238); + RzILOpEffect *gcc_expr_240 = BRANCH(op_EQ_118, EMPTY(), set_usr_field_call_171); + + // h_tmp232 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_242 = SETL("h_tmp232", cond_239); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_243 = SEQN(2, gcc_expr_240, op_ASSIGN_hybrid_tmp_242); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)) ? ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) : h_tmp232) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_122 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_124 = LOGAND(op_RSHIFT_122, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_130 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_132 = LOGAND(op_RSHIFT_130, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_138 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_140 = LOGAND(op_RSHIFT_138, SN(32, 0xffff)); + RzILOpPure *op_MUL_143 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_132), DUP(op_AND_132))), CAST(16, MSB(DUP(op_AND_132)), DUP(op_AND_132))), CAST(32, MSB(CAST(16, MSB(op_AND_140), DUP(op_AND_140))), CAST(16, MSB(DUP(op_AND_140)), DUP(op_AND_140)))); + RzILOpPure *op_LSHIFT_146 = SHIFTL0(CAST(64, MSB(op_MUL_143), DUP(op_MUL_143)), SN(32, 0)); + RzILOpPure *op_ADD_147 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_124), DUP(op_AND_124))), CAST(32, MSB(DUP(op_AND_124)), DUP(op_AND_124))), op_LSHIFT_146); + RzILOpPure *op_RSHIFT_151 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_153 = LOGAND(op_RSHIFT_151, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_159 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_161 = LOGAND(op_RSHIFT_159, SN(32, 0xffff)); + RzILOpPure *op_MUL_164 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_153), DUP(op_AND_153))), CAST(16, MSB(DUP(op_AND_153)), DUP(op_AND_153))), CAST(32, MSB(CAST(16, MSB(op_AND_161), DUP(op_AND_161))), CAST(16, MSB(DUP(op_AND_161)), DUP(op_AND_161)))); + RzILOpPure *op_LSHIFT_167 = SHIFTL0(CAST(64, MSB(op_MUL_164), DUP(op_MUL_164)), SN(32, 0)); + RzILOpPure *op_ADD_168 = ADD(op_ADD_147, op_LSHIFT_167); + RzILOpPure *cond_244 = ITE(DUP(op_EQ_118), op_ADD_168, VARL("h_tmp232")); + RzILOpPure *op_AND_246 = LOGAND(cond_244, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_250 = SHIFTL0(op_AND_246, SN(32, 0x20)); + RzILOpPure *op_OR_251 = LOGOR(op_AND_7, op_LSHIFT_250); + RzILOpEffect *op_ASSIGN_252 = WRITE_REG(bundle, Rxx_op, op_OR_251); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_253 = SEQN(2, seq_243, op_ASSIGN_252); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_423 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_268 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_270 = LOGAND(op_RSHIFT_268, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_276 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_278 = LOGAND(op_RSHIFT_276, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_284 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_286 = LOGAND(op_RSHIFT_284, SN(32, 0xffff)); + RzILOpPure *op_MUL_289 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_278), DUP(op_AND_278))), CAST(16, MSB(DUP(op_AND_278)), DUP(op_AND_278))), CAST(32, MSB(CAST(16, MSB(op_AND_286), DUP(op_AND_286))), CAST(16, MSB(DUP(op_AND_286)), DUP(op_AND_286)))); + RzILOpPure *op_LSHIFT_292 = SHIFTL0(CAST(64, MSB(op_MUL_289), DUP(op_MUL_289)), SN(32, 0)); + RzILOpPure *op_ADD_293 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_270), DUP(op_AND_270))), CAST(32, MSB(DUP(op_AND_270)), DUP(op_AND_270))), op_LSHIFT_292); + RzILOpPure *op_RSHIFT_297 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_299 = LOGAND(op_RSHIFT_297, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_305 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_307 = LOGAND(op_RSHIFT_305, SN(32, 0xffff)); + RzILOpPure *op_MUL_310 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_299), DUP(op_AND_299))), CAST(16, MSB(DUP(op_AND_299)), DUP(op_AND_299))), CAST(32, MSB(CAST(16, MSB(op_AND_307), DUP(op_AND_307))), CAST(16, MSB(DUP(op_AND_307)), DUP(op_AND_307)))); + RzILOpPure *op_LSHIFT_313 = SHIFTL0(CAST(64, MSB(op_MUL_310), DUP(op_MUL_310)), SN(32, 0)); + RzILOpPure *op_SUB_314 = SUB(op_ADD_293, op_LSHIFT_313); + RzILOpPure *op_RSHIFT_323 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_325 = LOGAND(op_RSHIFT_323, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_331 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_333 = LOGAND(op_RSHIFT_331, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_339 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_341 = LOGAND(op_RSHIFT_339, SN(32, 0xffff)); + RzILOpPure *op_MUL_344 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_333), DUP(op_AND_333))), CAST(16, MSB(DUP(op_AND_333)), DUP(op_AND_333))), CAST(32, MSB(CAST(16, MSB(op_AND_341), DUP(op_AND_341))), CAST(16, MSB(DUP(op_AND_341)), DUP(op_AND_341)))); + RzILOpPure *op_LSHIFT_347 = SHIFTL0(CAST(64, MSB(op_MUL_344), DUP(op_MUL_344)), SN(32, 0)); + RzILOpPure *op_ADD_348 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_325), DUP(op_AND_325))), CAST(32, MSB(DUP(op_AND_325)), DUP(op_AND_325))), op_LSHIFT_347); + RzILOpPure *op_RSHIFT_352 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_354 = LOGAND(op_RSHIFT_352, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_360 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_362 = LOGAND(op_RSHIFT_360, SN(32, 0xffff)); + RzILOpPure *op_MUL_365 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_354), DUP(op_AND_354))), CAST(16, MSB(DUP(op_AND_354)), DUP(op_AND_354))), CAST(32, MSB(CAST(16, MSB(op_AND_362), DUP(op_AND_362))), CAST(16, MSB(DUP(op_AND_362)), DUP(op_AND_362)))); + RzILOpPure *op_LSHIFT_368 = SHIFTL0(CAST(64, MSB(op_MUL_365), DUP(op_MUL_365)), SN(32, 0)); + RzILOpPure *op_SUB_369 = SUB(op_ADD_348, op_LSHIFT_368); + RzILOpPure *op_EQ_370 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_314), SN(32, 0), SN(32, 0x20)), op_SUB_369); + RzILOpPure *op_RSHIFT_427 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_429 = LOGAND(op_RSHIFT_427, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_435 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_437 = LOGAND(op_RSHIFT_435, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_443 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_445 = LOGAND(op_RSHIFT_443, SN(32, 0xffff)); + RzILOpPure *op_MUL_448 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_437), DUP(op_AND_437))), CAST(16, MSB(DUP(op_AND_437)), DUP(op_AND_437))), CAST(32, MSB(CAST(16, MSB(op_AND_445), DUP(op_AND_445))), CAST(16, MSB(DUP(op_AND_445)), DUP(op_AND_445)))); + RzILOpPure *op_LSHIFT_451 = SHIFTL0(CAST(64, MSB(op_MUL_448), DUP(op_MUL_448)), SN(32, 0)); + RzILOpPure *op_ADD_452 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_429), DUP(op_AND_429))), CAST(32, MSB(DUP(op_AND_429)), DUP(op_AND_429))), op_LSHIFT_451); + RzILOpPure *op_RSHIFT_456 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_458 = LOGAND(op_RSHIFT_456, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_464 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_466 = LOGAND(op_RSHIFT_464, SN(32, 0xffff)); + RzILOpPure *op_MUL_469 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_458), DUP(op_AND_458))), CAST(16, MSB(DUP(op_AND_458)), DUP(op_AND_458))), CAST(32, MSB(CAST(16, MSB(op_AND_466), DUP(op_AND_466))), CAST(16, MSB(DUP(op_AND_466)), DUP(op_AND_466)))); + RzILOpPure *op_LSHIFT_472 = SHIFTL0(CAST(64, MSB(op_MUL_469), DUP(op_MUL_469)), SN(32, 0)); + RzILOpPure *op_SUB_473 = SUB(op_ADD_452, op_LSHIFT_472); + RzILOpPure *op_LT_476 = SLT(op_SUB_473, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_481 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_482 = NEG(op_LSHIFT_481); + RzILOpPure *op_LSHIFT_487 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_490 = SUB(op_LSHIFT_487, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_491 = ITE(op_LT_476, op_NEG_482, op_SUB_490); + RzILOpEffect *gcc_expr_492 = BRANCH(op_EQ_370, EMPTY(), set_usr_field_call_423); + + // h_tmp233 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_494 = SETL("h_tmp233", cond_491); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_495 = SEQN(2, gcc_expr_492, op_ASSIGN_hybrid_tmp_494); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)) ? ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) : h_tmp233) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_259 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_260 = LOGNOT(op_LSHIFT_259); + RzILOpPure *op_AND_261 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_260); + RzILOpPure *op_RSHIFT_374 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_376 = LOGAND(op_RSHIFT_374, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_382 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_384 = LOGAND(op_RSHIFT_382, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_390 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_392 = LOGAND(op_RSHIFT_390, SN(32, 0xffff)); + RzILOpPure *op_MUL_395 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_384), DUP(op_AND_384))), CAST(16, MSB(DUP(op_AND_384)), DUP(op_AND_384))), CAST(32, MSB(CAST(16, MSB(op_AND_392), DUP(op_AND_392))), CAST(16, MSB(DUP(op_AND_392)), DUP(op_AND_392)))); + RzILOpPure *op_LSHIFT_398 = SHIFTL0(CAST(64, MSB(op_MUL_395), DUP(op_MUL_395)), SN(32, 0)); + RzILOpPure *op_ADD_399 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_376), DUP(op_AND_376))), CAST(32, MSB(DUP(op_AND_376)), DUP(op_AND_376))), op_LSHIFT_398); + RzILOpPure *op_RSHIFT_403 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_405 = LOGAND(op_RSHIFT_403, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_411 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_413 = LOGAND(op_RSHIFT_411, SN(32, 0xffff)); + RzILOpPure *op_MUL_416 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_405), DUP(op_AND_405))), CAST(16, MSB(DUP(op_AND_405)), DUP(op_AND_405))), CAST(32, MSB(CAST(16, MSB(op_AND_413), DUP(op_AND_413))), CAST(16, MSB(DUP(op_AND_413)), DUP(op_AND_413)))); + RzILOpPure *op_LSHIFT_419 = SHIFTL0(CAST(64, MSB(op_MUL_416), DUP(op_MUL_416)), SN(32, 0)); + RzILOpPure *op_SUB_420 = SUB(op_ADD_399, op_LSHIFT_419); + RzILOpPure *cond_496 = ITE(DUP(op_EQ_370), op_SUB_420, VARL("h_tmp233")); + RzILOpPure *op_AND_498 = LOGAND(cond_496, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_502 = SHIFTL0(op_AND_498, SN(32, 0)); + RzILOpPure *op_OR_503 = LOGOR(op_AND_261, op_LSHIFT_502); + RzILOpEffect *op_ASSIGN_504 = WRITE_REG(bundle, Rxx_op, op_OR_503); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_505 = SEQN(2, seq_495, op_ASSIGN_504); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_253, seq_505); + return instruction_sequence; +} + +// Rxx += cmpy(Rs,Rt):<<1:sat +RzILOpEffect *hex_il_op_m2_cmacs_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_171 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_32 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_34 = LOGAND(op_RSHIFT_32, SN(32, 0xffff)); + RzILOpPure *op_MUL_37 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_25), DUP(op_AND_25))), CAST(16, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(CAST(16, MSB(op_AND_34), DUP(op_AND_34))), CAST(16, MSB(DUP(op_AND_34)), DUP(op_AND_34)))); + RzILOpPure *op_LSHIFT_40 = SHIFTL0(CAST(64, MSB(op_MUL_37), DUP(op_MUL_37)), SN(32, 1)); + RzILOpPure *op_ADD_41 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_16), DUP(op_AND_16))), CAST(32, MSB(DUP(op_AND_16)), DUP(op_AND_16))), op_LSHIFT_40); + RzILOpPure *op_RSHIFT_45 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_47 = LOGAND(op_RSHIFT_45, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_53 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_55 = LOGAND(op_RSHIFT_53, SN(32, 0xffff)); + RzILOpPure *op_MUL_58 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_47), DUP(op_AND_47))), CAST(16, MSB(DUP(op_AND_47)), DUP(op_AND_47))), CAST(32, MSB(CAST(16, MSB(op_AND_55), DUP(op_AND_55))), CAST(16, MSB(DUP(op_AND_55)), DUP(op_AND_55)))); + RzILOpPure *op_LSHIFT_61 = SHIFTL0(CAST(64, MSB(op_MUL_58), DUP(op_MUL_58)), SN(32, 1)); + RzILOpPure *op_ADD_62 = ADD(op_ADD_41, op_LSHIFT_61); + RzILOpPure *op_RSHIFT_71 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_73 = LOGAND(op_RSHIFT_71, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_79 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_81 = LOGAND(op_RSHIFT_79, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_87 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_89 = LOGAND(op_RSHIFT_87, SN(32, 0xffff)); + RzILOpPure *op_MUL_92 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_81), DUP(op_AND_81))), CAST(16, MSB(DUP(op_AND_81)), DUP(op_AND_81))), CAST(32, MSB(CAST(16, MSB(op_AND_89), DUP(op_AND_89))), CAST(16, MSB(DUP(op_AND_89)), DUP(op_AND_89)))); + RzILOpPure *op_LSHIFT_95 = SHIFTL0(CAST(64, MSB(op_MUL_92), DUP(op_MUL_92)), SN(32, 1)); + RzILOpPure *op_ADD_96 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_73), DUP(op_AND_73))), CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73))), op_LSHIFT_95); + RzILOpPure *op_RSHIFT_100 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_102 = LOGAND(op_RSHIFT_100, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_108 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_110 = LOGAND(op_RSHIFT_108, SN(32, 0xffff)); + RzILOpPure *op_MUL_113 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_102), DUP(op_AND_102))), CAST(16, MSB(DUP(op_AND_102)), DUP(op_AND_102))), CAST(32, MSB(CAST(16, MSB(op_AND_110), DUP(op_AND_110))), CAST(16, MSB(DUP(op_AND_110)), DUP(op_AND_110)))); + RzILOpPure *op_LSHIFT_116 = SHIFTL0(CAST(64, MSB(op_MUL_113), DUP(op_MUL_113)), SN(32, 1)); + RzILOpPure *op_ADD_117 = ADD(op_ADD_96, op_LSHIFT_116); + RzILOpPure *op_EQ_118 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_62), SN(32, 0), SN(32, 0x20)), op_ADD_117); + RzILOpPure *op_RSHIFT_175 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_177 = LOGAND(op_RSHIFT_175, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_183 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_185 = LOGAND(op_RSHIFT_183, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_191 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_193 = LOGAND(op_RSHIFT_191, SN(32, 0xffff)); + RzILOpPure *op_MUL_196 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_185), DUP(op_AND_185))), CAST(16, MSB(DUP(op_AND_185)), DUP(op_AND_185))), CAST(32, MSB(CAST(16, MSB(op_AND_193), DUP(op_AND_193))), CAST(16, MSB(DUP(op_AND_193)), DUP(op_AND_193)))); + RzILOpPure *op_LSHIFT_199 = SHIFTL0(CAST(64, MSB(op_MUL_196), DUP(op_MUL_196)), SN(32, 1)); + RzILOpPure *op_ADD_200 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_177), DUP(op_AND_177))), CAST(32, MSB(DUP(op_AND_177)), DUP(op_AND_177))), op_LSHIFT_199); + RzILOpPure *op_RSHIFT_204 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_206 = LOGAND(op_RSHIFT_204, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_212 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_214 = LOGAND(op_RSHIFT_212, SN(32, 0xffff)); + RzILOpPure *op_MUL_217 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_206), DUP(op_AND_206))), CAST(16, MSB(DUP(op_AND_206)), DUP(op_AND_206))), CAST(32, MSB(CAST(16, MSB(op_AND_214), DUP(op_AND_214))), CAST(16, MSB(DUP(op_AND_214)), DUP(op_AND_214)))); + RzILOpPure *op_LSHIFT_220 = SHIFTL0(CAST(64, MSB(op_MUL_217), DUP(op_MUL_217)), SN(32, 1)); + RzILOpPure *op_ADD_221 = ADD(op_ADD_200, op_LSHIFT_220); + RzILOpPure *op_LT_224 = SLT(op_ADD_221, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_229 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_230 = NEG(op_LSHIFT_229); + RzILOpPure *op_LSHIFT_235 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_238 = SUB(op_LSHIFT_235, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_239 = ITE(op_LT_224, op_NEG_230, op_SUB_238); + RzILOpEffect *gcc_expr_240 = BRANCH(op_EQ_118, EMPTY(), set_usr_field_call_171); + + // h_tmp234 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_242 = SETL("h_tmp234", cond_239); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_243 = SEQN(2, gcc_expr_240, op_ASSIGN_hybrid_tmp_242); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)) ? ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) : h_tmp234) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_122 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_124 = LOGAND(op_RSHIFT_122, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_130 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_132 = LOGAND(op_RSHIFT_130, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_138 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_140 = LOGAND(op_RSHIFT_138, SN(32, 0xffff)); + RzILOpPure *op_MUL_143 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_132), DUP(op_AND_132))), CAST(16, MSB(DUP(op_AND_132)), DUP(op_AND_132))), CAST(32, MSB(CAST(16, MSB(op_AND_140), DUP(op_AND_140))), CAST(16, MSB(DUP(op_AND_140)), DUP(op_AND_140)))); + RzILOpPure *op_LSHIFT_146 = SHIFTL0(CAST(64, MSB(op_MUL_143), DUP(op_MUL_143)), SN(32, 1)); + RzILOpPure *op_ADD_147 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_124), DUP(op_AND_124))), CAST(32, MSB(DUP(op_AND_124)), DUP(op_AND_124))), op_LSHIFT_146); + RzILOpPure *op_RSHIFT_151 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_153 = LOGAND(op_RSHIFT_151, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_159 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_161 = LOGAND(op_RSHIFT_159, SN(32, 0xffff)); + RzILOpPure *op_MUL_164 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_153), DUP(op_AND_153))), CAST(16, MSB(DUP(op_AND_153)), DUP(op_AND_153))), CAST(32, MSB(CAST(16, MSB(op_AND_161), DUP(op_AND_161))), CAST(16, MSB(DUP(op_AND_161)), DUP(op_AND_161)))); + RzILOpPure *op_LSHIFT_167 = SHIFTL0(CAST(64, MSB(op_MUL_164), DUP(op_MUL_164)), SN(32, 1)); + RzILOpPure *op_ADD_168 = ADD(op_ADD_147, op_LSHIFT_167); + RzILOpPure *cond_244 = ITE(DUP(op_EQ_118), op_ADD_168, VARL("h_tmp234")); + RzILOpPure *op_AND_246 = LOGAND(cond_244, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_250 = SHIFTL0(op_AND_246, SN(32, 0x20)); + RzILOpPure *op_OR_251 = LOGOR(op_AND_7, op_LSHIFT_250); + RzILOpEffect *op_ASSIGN_252 = WRITE_REG(bundle, Rxx_op, op_OR_251); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_253 = SEQN(2, seq_243, op_ASSIGN_252); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_423 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_268 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_270 = LOGAND(op_RSHIFT_268, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_276 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_278 = LOGAND(op_RSHIFT_276, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_284 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_286 = LOGAND(op_RSHIFT_284, SN(32, 0xffff)); + RzILOpPure *op_MUL_289 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_278), DUP(op_AND_278))), CAST(16, MSB(DUP(op_AND_278)), DUP(op_AND_278))), CAST(32, MSB(CAST(16, MSB(op_AND_286), DUP(op_AND_286))), CAST(16, MSB(DUP(op_AND_286)), DUP(op_AND_286)))); + RzILOpPure *op_LSHIFT_292 = SHIFTL0(CAST(64, MSB(op_MUL_289), DUP(op_MUL_289)), SN(32, 1)); + RzILOpPure *op_ADD_293 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_270), DUP(op_AND_270))), CAST(32, MSB(DUP(op_AND_270)), DUP(op_AND_270))), op_LSHIFT_292); + RzILOpPure *op_RSHIFT_297 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_299 = LOGAND(op_RSHIFT_297, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_305 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_307 = LOGAND(op_RSHIFT_305, SN(32, 0xffff)); + RzILOpPure *op_MUL_310 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_299), DUP(op_AND_299))), CAST(16, MSB(DUP(op_AND_299)), DUP(op_AND_299))), CAST(32, MSB(CAST(16, MSB(op_AND_307), DUP(op_AND_307))), CAST(16, MSB(DUP(op_AND_307)), DUP(op_AND_307)))); + RzILOpPure *op_LSHIFT_313 = SHIFTL0(CAST(64, MSB(op_MUL_310), DUP(op_MUL_310)), SN(32, 1)); + RzILOpPure *op_SUB_314 = SUB(op_ADD_293, op_LSHIFT_313); + RzILOpPure *op_RSHIFT_323 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_325 = LOGAND(op_RSHIFT_323, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_331 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_333 = LOGAND(op_RSHIFT_331, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_339 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_341 = LOGAND(op_RSHIFT_339, SN(32, 0xffff)); + RzILOpPure *op_MUL_344 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_333), DUP(op_AND_333))), CAST(16, MSB(DUP(op_AND_333)), DUP(op_AND_333))), CAST(32, MSB(CAST(16, MSB(op_AND_341), DUP(op_AND_341))), CAST(16, MSB(DUP(op_AND_341)), DUP(op_AND_341)))); + RzILOpPure *op_LSHIFT_347 = SHIFTL0(CAST(64, MSB(op_MUL_344), DUP(op_MUL_344)), SN(32, 1)); + RzILOpPure *op_ADD_348 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_325), DUP(op_AND_325))), CAST(32, MSB(DUP(op_AND_325)), DUP(op_AND_325))), op_LSHIFT_347); + RzILOpPure *op_RSHIFT_352 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_354 = LOGAND(op_RSHIFT_352, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_360 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_362 = LOGAND(op_RSHIFT_360, SN(32, 0xffff)); + RzILOpPure *op_MUL_365 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_354), DUP(op_AND_354))), CAST(16, MSB(DUP(op_AND_354)), DUP(op_AND_354))), CAST(32, MSB(CAST(16, MSB(op_AND_362), DUP(op_AND_362))), CAST(16, MSB(DUP(op_AND_362)), DUP(op_AND_362)))); + RzILOpPure *op_LSHIFT_368 = SHIFTL0(CAST(64, MSB(op_MUL_365), DUP(op_MUL_365)), SN(32, 1)); + RzILOpPure *op_SUB_369 = SUB(op_ADD_348, op_LSHIFT_368); + RzILOpPure *op_EQ_370 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_314), SN(32, 0), SN(32, 0x20)), op_SUB_369); + RzILOpPure *op_RSHIFT_427 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_429 = LOGAND(op_RSHIFT_427, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_435 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_437 = LOGAND(op_RSHIFT_435, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_443 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_445 = LOGAND(op_RSHIFT_443, SN(32, 0xffff)); + RzILOpPure *op_MUL_448 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_437), DUP(op_AND_437))), CAST(16, MSB(DUP(op_AND_437)), DUP(op_AND_437))), CAST(32, MSB(CAST(16, MSB(op_AND_445), DUP(op_AND_445))), CAST(16, MSB(DUP(op_AND_445)), DUP(op_AND_445)))); + RzILOpPure *op_LSHIFT_451 = SHIFTL0(CAST(64, MSB(op_MUL_448), DUP(op_MUL_448)), SN(32, 1)); + RzILOpPure *op_ADD_452 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_429), DUP(op_AND_429))), CAST(32, MSB(DUP(op_AND_429)), DUP(op_AND_429))), op_LSHIFT_451); + RzILOpPure *op_RSHIFT_456 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_458 = LOGAND(op_RSHIFT_456, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_464 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_466 = LOGAND(op_RSHIFT_464, SN(32, 0xffff)); + RzILOpPure *op_MUL_469 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_458), DUP(op_AND_458))), CAST(16, MSB(DUP(op_AND_458)), DUP(op_AND_458))), CAST(32, MSB(CAST(16, MSB(op_AND_466), DUP(op_AND_466))), CAST(16, MSB(DUP(op_AND_466)), DUP(op_AND_466)))); + RzILOpPure *op_LSHIFT_472 = SHIFTL0(CAST(64, MSB(op_MUL_469), DUP(op_MUL_469)), SN(32, 1)); + RzILOpPure *op_SUB_473 = SUB(op_ADD_452, op_LSHIFT_472); + RzILOpPure *op_LT_476 = SLT(op_SUB_473, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_481 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_482 = NEG(op_LSHIFT_481); + RzILOpPure *op_LSHIFT_487 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_490 = SUB(op_LSHIFT_487, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_491 = ITE(op_LT_476, op_NEG_482, op_SUB_490); + RzILOpEffect *gcc_expr_492 = BRANCH(op_EQ_370, EMPTY(), set_usr_field_call_423); + + // h_tmp235 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_494 = SETL("h_tmp235", cond_491); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_495 = SEQN(2, gcc_expr_492, op_ASSIGN_hybrid_tmp_494); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)) ? ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) : h_tmp235) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_259 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_260 = LOGNOT(op_LSHIFT_259); + RzILOpPure *op_AND_261 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_260); + RzILOpPure *op_RSHIFT_374 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_376 = LOGAND(op_RSHIFT_374, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_382 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_384 = LOGAND(op_RSHIFT_382, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_390 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_392 = LOGAND(op_RSHIFT_390, SN(32, 0xffff)); + RzILOpPure *op_MUL_395 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_384), DUP(op_AND_384))), CAST(16, MSB(DUP(op_AND_384)), DUP(op_AND_384))), CAST(32, MSB(CAST(16, MSB(op_AND_392), DUP(op_AND_392))), CAST(16, MSB(DUP(op_AND_392)), DUP(op_AND_392)))); + RzILOpPure *op_LSHIFT_398 = SHIFTL0(CAST(64, MSB(op_MUL_395), DUP(op_MUL_395)), SN(32, 1)); + RzILOpPure *op_ADD_399 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_376), DUP(op_AND_376))), CAST(32, MSB(DUP(op_AND_376)), DUP(op_AND_376))), op_LSHIFT_398); + RzILOpPure *op_RSHIFT_403 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_405 = LOGAND(op_RSHIFT_403, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_411 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_413 = LOGAND(op_RSHIFT_411, SN(32, 0xffff)); + RzILOpPure *op_MUL_416 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_405), DUP(op_AND_405))), CAST(16, MSB(DUP(op_AND_405)), DUP(op_AND_405))), CAST(32, MSB(CAST(16, MSB(op_AND_413), DUP(op_AND_413))), CAST(16, MSB(DUP(op_AND_413)), DUP(op_AND_413)))); + RzILOpPure *op_LSHIFT_419 = SHIFTL0(CAST(64, MSB(op_MUL_416), DUP(op_MUL_416)), SN(32, 1)); + RzILOpPure *op_SUB_420 = SUB(op_ADD_399, op_LSHIFT_419); + RzILOpPure *cond_496 = ITE(DUP(op_EQ_370), op_SUB_420, VARL("h_tmp235")); + RzILOpPure *op_AND_498 = LOGAND(cond_496, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_502 = SHIFTL0(op_AND_498, SN(32, 0)); + RzILOpPure *op_OR_503 = LOGOR(op_AND_261, op_LSHIFT_502); + RzILOpEffect *op_ASSIGN_504 = WRITE_REG(bundle, Rxx_op, op_OR_503); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_505 = SEQN(2, seq_495, op_ASSIGN_504); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_253, seq_505); + return instruction_sequence; +} + +// Rxx += cmpy(Rs,Rt*):sat +RzILOpEffect *hex_il_op_m2_cmacsc_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_171 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_32 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_34 = LOGAND(op_RSHIFT_32, SN(32, 0xffff)); + RzILOpPure *op_MUL_37 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_25), DUP(op_AND_25))), CAST(16, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(CAST(16, MSB(op_AND_34), DUP(op_AND_34))), CAST(16, MSB(DUP(op_AND_34)), DUP(op_AND_34)))); + RzILOpPure *op_LSHIFT_40 = SHIFTL0(CAST(64, MSB(op_MUL_37), DUP(op_MUL_37)), SN(32, 0)); + RzILOpPure *op_ADD_41 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_16), DUP(op_AND_16))), CAST(32, MSB(DUP(op_AND_16)), DUP(op_AND_16))), op_LSHIFT_40); + RzILOpPure *op_RSHIFT_45 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_47 = LOGAND(op_RSHIFT_45, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_53 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_55 = LOGAND(op_RSHIFT_53, SN(32, 0xffff)); + RzILOpPure *op_MUL_58 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_47), DUP(op_AND_47))), CAST(16, MSB(DUP(op_AND_47)), DUP(op_AND_47))), CAST(32, MSB(CAST(16, MSB(op_AND_55), DUP(op_AND_55))), CAST(16, MSB(DUP(op_AND_55)), DUP(op_AND_55)))); + RzILOpPure *op_LSHIFT_61 = SHIFTL0(CAST(64, MSB(op_MUL_58), DUP(op_MUL_58)), SN(32, 0)); + RzILOpPure *op_SUB_62 = SUB(op_ADD_41, op_LSHIFT_61); + RzILOpPure *op_RSHIFT_71 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_73 = LOGAND(op_RSHIFT_71, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_79 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_81 = LOGAND(op_RSHIFT_79, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_87 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_89 = LOGAND(op_RSHIFT_87, SN(32, 0xffff)); + RzILOpPure *op_MUL_92 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_81), DUP(op_AND_81))), CAST(16, MSB(DUP(op_AND_81)), DUP(op_AND_81))), CAST(32, MSB(CAST(16, MSB(op_AND_89), DUP(op_AND_89))), CAST(16, MSB(DUP(op_AND_89)), DUP(op_AND_89)))); + RzILOpPure *op_LSHIFT_95 = SHIFTL0(CAST(64, MSB(op_MUL_92), DUP(op_MUL_92)), SN(32, 0)); + RzILOpPure *op_ADD_96 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_73), DUP(op_AND_73))), CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73))), op_LSHIFT_95); + RzILOpPure *op_RSHIFT_100 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_102 = LOGAND(op_RSHIFT_100, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_108 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_110 = LOGAND(op_RSHIFT_108, SN(32, 0xffff)); + RzILOpPure *op_MUL_113 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_102), DUP(op_AND_102))), CAST(16, MSB(DUP(op_AND_102)), DUP(op_AND_102))), CAST(32, MSB(CAST(16, MSB(op_AND_110), DUP(op_AND_110))), CAST(16, MSB(DUP(op_AND_110)), DUP(op_AND_110)))); + RzILOpPure *op_LSHIFT_116 = SHIFTL0(CAST(64, MSB(op_MUL_113), DUP(op_MUL_113)), SN(32, 0)); + RzILOpPure *op_SUB_117 = SUB(op_ADD_96, op_LSHIFT_116); + RzILOpPure *op_EQ_118 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_62), SN(32, 0), SN(32, 0x20)), op_SUB_117); + RzILOpPure *op_RSHIFT_175 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_177 = LOGAND(op_RSHIFT_175, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_183 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_185 = LOGAND(op_RSHIFT_183, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_191 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_193 = LOGAND(op_RSHIFT_191, SN(32, 0xffff)); + RzILOpPure *op_MUL_196 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_185), DUP(op_AND_185))), CAST(16, MSB(DUP(op_AND_185)), DUP(op_AND_185))), CAST(32, MSB(CAST(16, MSB(op_AND_193), DUP(op_AND_193))), CAST(16, MSB(DUP(op_AND_193)), DUP(op_AND_193)))); + RzILOpPure *op_LSHIFT_199 = SHIFTL0(CAST(64, MSB(op_MUL_196), DUP(op_MUL_196)), SN(32, 0)); + RzILOpPure *op_ADD_200 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_177), DUP(op_AND_177))), CAST(32, MSB(DUP(op_AND_177)), DUP(op_AND_177))), op_LSHIFT_199); + RzILOpPure *op_RSHIFT_204 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_206 = LOGAND(op_RSHIFT_204, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_212 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_214 = LOGAND(op_RSHIFT_212, SN(32, 0xffff)); + RzILOpPure *op_MUL_217 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_206), DUP(op_AND_206))), CAST(16, MSB(DUP(op_AND_206)), DUP(op_AND_206))), CAST(32, MSB(CAST(16, MSB(op_AND_214), DUP(op_AND_214))), CAST(16, MSB(DUP(op_AND_214)), DUP(op_AND_214)))); + RzILOpPure *op_LSHIFT_220 = SHIFTL0(CAST(64, MSB(op_MUL_217), DUP(op_MUL_217)), SN(32, 0)); + RzILOpPure *op_SUB_221 = SUB(op_ADD_200, op_LSHIFT_220); + RzILOpPure *op_LT_224 = SLT(op_SUB_221, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_229 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_230 = NEG(op_LSHIFT_229); + RzILOpPure *op_LSHIFT_235 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_238 = SUB(op_LSHIFT_235, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_239 = ITE(op_LT_224, op_NEG_230, op_SUB_238); + RzILOpEffect *gcc_expr_240 = BRANCH(op_EQ_118, EMPTY(), set_usr_field_call_171); + + // h_tmp236 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_242 = SETL("h_tmp236", cond_239); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_243 = SEQN(2, gcc_expr_240, op_ASSIGN_hybrid_tmp_242); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)) ? ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) : h_tmp236) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_122 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_124 = LOGAND(op_RSHIFT_122, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_130 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_132 = LOGAND(op_RSHIFT_130, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_138 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_140 = LOGAND(op_RSHIFT_138, SN(32, 0xffff)); + RzILOpPure *op_MUL_143 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_132), DUP(op_AND_132))), CAST(16, MSB(DUP(op_AND_132)), DUP(op_AND_132))), CAST(32, MSB(CAST(16, MSB(op_AND_140), DUP(op_AND_140))), CAST(16, MSB(DUP(op_AND_140)), DUP(op_AND_140)))); + RzILOpPure *op_LSHIFT_146 = SHIFTL0(CAST(64, MSB(op_MUL_143), DUP(op_MUL_143)), SN(32, 0)); + RzILOpPure *op_ADD_147 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_124), DUP(op_AND_124))), CAST(32, MSB(DUP(op_AND_124)), DUP(op_AND_124))), op_LSHIFT_146); + RzILOpPure *op_RSHIFT_151 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_153 = LOGAND(op_RSHIFT_151, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_159 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_161 = LOGAND(op_RSHIFT_159, SN(32, 0xffff)); + RzILOpPure *op_MUL_164 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_153), DUP(op_AND_153))), CAST(16, MSB(DUP(op_AND_153)), DUP(op_AND_153))), CAST(32, MSB(CAST(16, MSB(op_AND_161), DUP(op_AND_161))), CAST(16, MSB(DUP(op_AND_161)), DUP(op_AND_161)))); + RzILOpPure *op_LSHIFT_167 = SHIFTL0(CAST(64, MSB(op_MUL_164), DUP(op_MUL_164)), SN(32, 0)); + RzILOpPure *op_SUB_168 = SUB(op_ADD_147, op_LSHIFT_167); + RzILOpPure *cond_244 = ITE(DUP(op_EQ_118), op_SUB_168, VARL("h_tmp236")); + RzILOpPure *op_AND_246 = LOGAND(cond_244, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_250 = SHIFTL0(op_AND_246, SN(32, 0x20)); + RzILOpPure *op_OR_251 = LOGOR(op_AND_7, op_LSHIFT_250); + RzILOpEffect *op_ASSIGN_252 = WRITE_REG(bundle, Rxx_op, op_OR_251); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_253 = SEQN(2, seq_243, op_ASSIGN_252); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_423 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_268 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_270 = LOGAND(op_RSHIFT_268, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_276 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_278 = LOGAND(op_RSHIFT_276, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_284 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_286 = LOGAND(op_RSHIFT_284, SN(32, 0xffff)); + RzILOpPure *op_MUL_289 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_278), DUP(op_AND_278))), CAST(16, MSB(DUP(op_AND_278)), DUP(op_AND_278))), CAST(32, MSB(CAST(16, MSB(op_AND_286), DUP(op_AND_286))), CAST(16, MSB(DUP(op_AND_286)), DUP(op_AND_286)))); + RzILOpPure *op_LSHIFT_292 = SHIFTL0(CAST(64, MSB(op_MUL_289), DUP(op_MUL_289)), SN(32, 0)); + RzILOpPure *op_ADD_293 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_270), DUP(op_AND_270))), CAST(32, MSB(DUP(op_AND_270)), DUP(op_AND_270))), op_LSHIFT_292); + RzILOpPure *op_RSHIFT_297 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_299 = LOGAND(op_RSHIFT_297, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_305 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_307 = LOGAND(op_RSHIFT_305, SN(32, 0xffff)); + RzILOpPure *op_MUL_310 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_299), DUP(op_AND_299))), CAST(16, MSB(DUP(op_AND_299)), DUP(op_AND_299))), CAST(32, MSB(CAST(16, MSB(op_AND_307), DUP(op_AND_307))), CAST(16, MSB(DUP(op_AND_307)), DUP(op_AND_307)))); + RzILOpPure *op_LSHIFT_313 = SHIFTL0(CAST(64, MSB(op_MUL_310), DUP(op_MUL_310)), SN(32, 0)); + RzILOpPure *op_ADD_314 = ADD(op_ADD_293, op_LSHIFT_313); + RzILOpPure *op_RSHIFT_323 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_325 = LOGAND(op_RSHIFT_323, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_331 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_333 = LOGAND(op_RSHIFT_331, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_339 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_341 = LOGAND(op_RSHIFT_339, SN(32, 0xffff)); + RzILOpPure *op_MUL_344 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_333), DUP(op_AND_333))), CAST(16, MSB(DUP(op_AND_333)), DUP(op_AND_333))), CAST(32, MSB(CAST(16, MSB(op_AND_341), DUP(op_AND_341))), CAST(16, MSB(DUP(op_AND_341)), DUP(op_AND_341)))); + RzILOpPure *op_LSHIFT_347 = SHIFTL0(CAST(64, MSB(op_MUL_344), DUP(op_MUL_344)), SN(32, 0)); + RzILOpPure *op_ADD_348 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_325), DUP(op_AND_325))), CAST(32, MSB(DUP(op_AND_325)), DUP(op_AND_325))), op_LSHIFT_347); + RzILOpPure *op_RSHIFT_352 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_354 = LOGAND(op_RSHIFT_352, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_360 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_362 = LOGAND(op_RSHIFT_360, SN(32, 0xffff)); + RzILOpPure *op_MUL_365 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_354), DUP(op_AND_354))), CAST(16, MSB(DUP(op_AND_354)), DUP(op_AND_354))), CAST(32, MSB(CAST(16, MSB(op_AND_362), DUP(op_AND_362))), CAST(16, MSB(DUP(op_AND_362)), DUP(op_AND_362)))); + RzILOpPure *op_LSHIFT_368 = SHIFTL0(CAST(64, MSB(op_MUL_365), DUP(op_MUL_365)), SN(32, 0)); + RzILOpPure *op_ADD_369 = ADD(op_ADD_348, op_LSHIFT_368); + RzILOpPure *op_EQ_370 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_314), SN(32, 0), SN(32, 0x20)), op_ADD_369); + RzILOpPure *op_RSHIFT_427 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_429 = LOGAND(op_RSHIFT_427, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_435 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_437 = LOGAND(op_RSHIFT_435, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_443 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_445 = LOGAND(op_RSHIFT_443, SN(32, 0xffff)); + RzILOpPure *op_MUL_448 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_437), DUP(op_AND_437))), CAST(16, MSB(DUP(op_AND_437)), DUP(op_AND_437))), CAST(32, MSB(CAST(16, MSB(op_AND_445), DUP(op_AND_445))), CAST(16, MSB(DUP(op_AND_445)), DUP(op_AND_445)))); + RzILOpPure *op_LSHIFT_451 = SHIFTL0(CAST(64, MSB(op_MUL_448), DUP(op_MUL_448)), SN(32, 0)); + RzILOpPure *op_ADD_452 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_429), DUP(op_AND_429))), CAST(32, MSB(DUP(op_AND_429)), DUP(op_AND_429))), op_LSHIFT_451); + RzILOpPure *op_RSHIFT_456 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_458 = LOGAND(op_RSHIFT_456, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_464 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_466 = LOGAND(op_RSHIFT_464, SN(32, 0xffff)); + RzILOpPure *op_MUL_469 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_458), DUP(op_AND_458))), CAST(16, MSB(DUP(op_AND_458)), DUP(op_AND_458))), CAST(32, MSB(CAST(16, MSB(op_AND_466), DUP(op_AND_466))), CAST(16, MSB(DUP(op_AND_466)), DUP(op_AND_466)))); + RzILOpPure *op_LSHIFT_472 = SHIFTL0(CAST(64, MSB(op_MUL_469), DUP(op_MUL_469)), SN(32, 0)); + RzILOpPure *op_ADD_473 = ADD(op_ADD_452, op_LSHIFT_472); + RzILOpPure *op_LT_476 = SLT(op_ADD_473, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_481 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_482 = NEG(op_LSHIFT_481); + RzILOpPure *op_LSHIFT_487 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_490 = SUB(op_LSHIFT_487, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_491 = ITE(op_LT_476, op_NEG_482, op_SUB_490); + RzILOpEffect *gcc_expr_492 = BRANCH(op_EQ_370, EMPTY(), set_usr_field_call_423); + + // h_tmp237 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_494 = SETL("h_tmp237", cond_491); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_495 = SEQN(2, gcc_expr_492, op_ASSIGN_hybrid_tmp_494); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)) ? ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) : h_tmp237) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_259 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_260 = LOGNOT(op_LSHIFT_259); + RzILOpPure *op_AND_261 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_260); + RzILOpPure *op_RSHIFT_374 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_376 = LOGAND(op_RSHIFT_374, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_382 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_384 = LOGAND(op_RSHIFT_382, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_390 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_392 = LOGAND(op_RSHIFT_390, SN(32, 0xffff)); + RzILOpPure *op_MUL_395 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_384), DUP(op_AND_384))), CAST(16, MSB(DUP(op_AND_384)), DUP(op_AND_384))), CAST(32, MSB(CAST(16, MSB(op_AND_392), DUP(op_AND_392))), CAST(16, MSB(DUP(op_AND_392)), DUP(op_AND_392)))); + RzILOpPure *op_LSHIFT_398 = SHIFTL0(CAST(64, MSB(op_MUL_395), DUP(op_MUL_395)), SN(32, 0)); + RzILOpPure *op_ADD_399 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_376), DUP(op_AND_376))), CAST(32, MSB(DUP(op_AND_376)), DUP(op_AND_376))), op_LSHIFT_398); + RzILOpPure *op_RSHIFT_403 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_405 = LOGAND(op_RSHIFT_403, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_411 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_413 = LOGAND(op_RSHIFT_411, SN(32, 0xffff)); + RzILOpPure *op_MUL_416 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_405), DUP(op_AND_405))), CAST(16, MSB(DUP(op_AND_405)), DUP(op_AND_405))), CAST(32, MSB(CAST(16, MSB(op_AND_413), DUP(op_AND_413))), CAST(16, MSB(DUP(op_AND_413)), DUP(op_AND_413)))); + RzILOpPure *op_LSHIFT_419 = SHIFTL0(CAST(64, MSB(op_MUL_416), DUP(op_MUL_416)), SN(32, 0)); + RzILOpPure *op_ADD_420 = ADD(op_ADD_399, op_LSHIFT_419); + RzILOpPure *cond_496 = ITE(DUP(op_EQ_370), op_ADD_420, VARL("h_tmp237")); + RzILOpPure *op_AND_498 = LOGAND(cond_496, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_502 = SHIFTL0(op_AND_498, SN(32, 0)); + RzILOpPure *op_OR_503 = LOGOR(op_AND_261, op_LSHIFT_502); + RzILOpEffect *op_ASSIGN_504 = WRITE_REG(bundle, Rxx_op, op_OR_503); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_505 = SEQN(2, seq_495, op_ASSIGN_504); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_253, seq_505); + return instruction_sequence; +} + +// Rxx += cmpy(Rs,Rt*):<<1:sat +RzILOpEffect *hex_il_op_m2_cmacsc_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_171 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_32 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_34 = LOGAND(op_RSHIFT_32, SN(32, 0xffff)); + RzILOpPure *op_MUL_37 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_25), DUP(op_AND_25))), CAST(16, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(CAST(16, MSB(op_AND_34), DUP(op_AND_34))), CAST(16, MSB(DUP(op_AND_34)), DUP(op_AND_34)))); + RzILOpPure *op_LSHIFT_40 = SHIFTL0(CAST(64, MSB(op_MUL_37), DUP(op_MUL_37)), SN(32, 1)); + RzILOpPure *op_ADD_41 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_16), DUP(op_AND_16))), CAST(32, MSB(DUP(op_AND_16)), DUP(op_AND_16))), op_LSHIFT_40); + RzILOpPure *op_RSHIFT_45 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_47 = LOGAND(op_RSHIFT_45, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_53 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_55 = LOGAND(op_RSHIFT_53, SN(32, 0xffff)); + RzILOpPure *op_MUL_58 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_47), DUP(op_AND_47))), CAST(16, MSB(DUP(op_AND_47)), DUP(op_AND_47))), CAST(32, MSB(CAST(16, MSB(op_AND_55), DUP(op_AND_55))), CAST(16, MSB(DUP(op_AND_55)), DUP(op_AND_55)))); + RzILOpPure *op_LSHIFT_61 = SHIFTL0(CAST(64, MSB(op_MUL_58), DUP(op_MUL_58)), SN(32, 1)); + RzILOpPure *op_SUB_62 = SUB(op_ADD_41, op_LSHIFT_61); + RzILOpPure *op_RSHIFT_71 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_73 = LOGAND(op_RSHIFT_71, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_79 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_81 = LOGAND(op_RSHIFT_79, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_87 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_89 = LOGAND(op_RSHIFT_87, SN(32, 0xffff)); + RzILOpPure *op_MUL_92 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_81), DUP(op_AND_81))), CAST(16, MSB(DUP(op_AND_81)), DUP(op_AND_81))), CAST(32, MSB(CAST(16, MSB(op_AND_89), DUP(op_AND_89))), CAST(16, MSB(DUP(op_AND_89)), DUP(op_AND_89)))); + RzILOpPure *op_LSHIFT_95 = SHIFTL0(CAST(64, MSB(op_MUL_92), DUP(op_MUL_92)), SN(32, 1)); + RzILOpPure *op_ADD_96 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_73), DUP(op_AND_73))), CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73))), op_LSHIFT_95); + RzILOpPure *op_RSHIFT_100 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_102 = LOGAND(op_RSHIFT_100, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_108 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_110 = LOGAND(op_RSHIFT_108, SN(32, 0xffff)); + RzILOpPure *op_MUL_113 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_102), DUP(op_AND_102))), CAST(16, MSB(DUP(op_AND_102)), DUP(op_AND_102))), CAST(32, MSB(CAST(16, MSB(op_AND_110), DUP(op_AND_110))), CAST(16, MSB(DUP(op_AND_110)), DUP(op_AND_110)))); + RzILOpPure *op_LSHIFT_116 = SHIFTL0(CAST(64, MSB(op_MUL_113), DUP(op_MUL_113)), SN(32, 1)); + RzILOpPure *op_SUB_117 = SUB(op_ADD_96, op_LSHIFT_116); + RzILOpPure *op_EQ_118 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_62), SN(32, 0), SN(32, 0x20)), op_SUB_117); + RzILOpPure *op_RSHIFT_175 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_177 = LOGAND(op_RSHIFT_175, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_183 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_185 = LOGAND(op_RSHIFT_183, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_191 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_193 = LOGAND(op_RSHIFT_191, SN(32, 0xffff)); + RzILOpPure *op_MUL_196 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_185), DUP(op_AND_185))), CAST(16, MSB(DUP(op_AND_185)), DUP(op_AND_185))), CAST(32, MSB(CAST(16, MSB(op_AND_193), DUP(op_AND_193))), CAST(16, MSB(DUP(op_AND_193)), DUP(op_AND_193)))); + RzILOpPure *op_LSHIFT_199 = SHIFTL0(CAST(64, MSB(op_MUL_196), DUP(op_MUL_196)), SN(32, 1)); + RzILOpPure *op_ADD_200 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_177), DUP(op_AND_177))), CAST(32, MSB(DUP(op_AND_177)), DUP(op_AND_177))), op_LSHIFT_199); + RzILOpPure *op_RSHIFT_204 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_206 = LOGAND(op_RSHIFT_204, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_212 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_214 = LOGAND(op_RSHIFT_212, SN(32, 0xffff)); + RzILOpPure *op_MUL_217 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_206), DUP(op_AND_206))), CAST(16, MSB(DUP(op_AND_206)), DUP(op_AND_206))), CAST(32, MSB(CAST(16, MSB(op_AND_214), DUP(op_AND_214))), CAST(16, MSB(DUP(op_AND_214)), DUP(op_AND_214)))); + RzILOpPure *op_LSHIFT_220 = SHIFTL0(CAST(64, MSB(op_MUL_217), DUP(op_MUL_217)), SN(32, 1)); + RzILOpPure *op_SUB_221 = SUB(op_ADD_200, op_LSHIFT_220); + RzILOpPure *op_LT_224 = SLT(op_SUB_221, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_229 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_230 = NEG(op_LSHIFT_229); + RzILOpPure *op_LSHIFT_235 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_238 = SUB(op_LSHIFT_235, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_239 = ITE(op_LT_224, op_NEG_230, op_SUB_238); + RzILOpEffect *gcc_expr_240 = BRANCH(op_EQ_118, EMPTY(), set_usr_field_call_171); + + // h_tmp238 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_242 = SETL("h_tmp238", cond_239); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_243 = SEQN(2, gcc_expr_240, op_ASSIGN_hybrid_tmp_242); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)) ? ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) : h_tmp238) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_122 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_124 = LOGAND(op_RSHIFT_122, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_130 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_132 = LOGAND(op_RSHIFT_130, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_138 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_140 = LOGAND(op_RSHIFT_138, SN(32, 0xffff)); + RzILOpPure *op_MUL_143 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_132), DUP(op_AND_132))), CAST(16, MSB(DUP(op_AND_132)), DUP(op_AND_132))), CAST(32, MSB(CAST(16, MSB(op_AND_140), DUP(op_AND_140))), CAST(16, MSB(DUP(op_AND_140)), DUP(op_AND_140)))); + RzILOpPure *op_LSHIFT_146 = SHIFTL0(CAST(64, MSB(op_MUL_143), DUP(op_MUL_143)), SN(32, 1)); + RzILOpPure *op_ADD_147 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_124), DUP(op_AND_124))), CAST(32, MSB(DUP(op_AND_124)), DUP(op_AND_124))), op_LSHIFT_146); + RzILOpPure *op_RSHIFT_151 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_153 = LOGAND(op_RSHIFT_151, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_159 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_161 = LOGAND(op_RSHIFT_159, SN(32, 0xffff)); + RzILOpPure *op_MUL_164 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_153), DUP(op_AND_153))), CAST(16, MSB(DUP(op_AND_153)), DUP(op_AND_153))), CAST(32, MSB(CAST(16, MSB(op_AND_161), DUP(op_AND_161))), CAST(16, MSB(DUP(op_AND_161)), DUP(op_AND_161)))); + RzILOpPure *op_LSHIFT_167 = SHIFTL0(CAST(64, MSB(op_MUL_164), DUP(op_MUL_164)), SN(32, 1)); + RzILOpPure *op_SUB_168 = SUB(op_ADD_147, op_LSHIFT_167); + RzILOpPure *cond_244 = ITE(DUP(op_EQ_118), op_SUB_168, VARL("h_tmp238")); + RzILOpPure *op_AND_246 = LOGAND(cond_244, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_250 = SHIFTL0(op_AND_246, SN(32, 0x20)); + RzILOpPure *op_OR_251 = LOGOR(op_AND_7, op_LSHIFT_250); + RzILOpEffect *op_ASSIGN_252 = WRITE_REG(bundle, Rxx_op, op_OR_251); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_253 = SEQN(2, seq_243, op_ASSIGN_252); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_423 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_268 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_270 = LOGAND(op_RSHIFT_268, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_276 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_278 = LOGAND(op_RSHIFT_276, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_284 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_286 = LOGAND(op_RSHIFT_284, SN(32, 0xffff)); + RzILOpPure *op_MUL_289 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_278), DUP(op_AND_278))), CAST(16, MSB(DUP(op_AND_278)), DUP(op_AND_278))), CAST(32, MSB(CAST(16, MSB(op_AND_286), DUP(op_AND_286))), CAST(16, MSB(DUP(op_AND_286)), DUP(op_AND_286)))); + RzILOpPure *op_LSHIFT_292 = SHIFTL0(CAST(64, MSB(op_MUL_289), DUP(op_MUL_289)), SN(32, 1)); + RzILOpPure *op_ADD_293 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_270), DUP(op_AND_270))), CAST(32, MSB(DUP(op_AND_270)), DUP(op_AND_270))), op_LSHIFT_292); + RzILOpPure *op_RSHIFT_297 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_299 = LOGAND(op_RSHIFT_297, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_305 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_307 = LOGAND(op_RSHIFT_305, SN(32, 0xffff)); + RzILOpPure *op_MUL_310 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_299), DUP(op_AND_299))), CAST(16, MSB(DUP(op_AND_299)), DUP(op_AND_299))), CAST(32, MSB(CAST(16, MSB(op_AND_307), DUP(op_AND_307))), CAST(16, MSB(DUP(op_AND_307)), DUP(op_AND_307)))); + RzILOpPure *op_LSHIFT_313 = SHIFTL0(CAST(64, MSB(op_MUL_310), DUP(op_MUL_310)), SN(32, 1)); + RzILOpPure *op_ADD_314 = ADD(op_ADD_293, op_LSHIFT_313); + RzILOpPure *op_RSHIFT_323 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_325 = LOGAND(op_RSHIFT_323, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_331 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_333 = LOGAND(op_RSHIFT_331, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_339 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_341 = LOGAND(op_RSHIFT_339, SN(32, 0xffff)); + RzILOpPure *op_MUL_344 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_333), DUP(op_AND_333))), CAST(16, MSB(DUP(op_AND_333)), DUP(op_AND_333))), CAST(32, MSB(CAST(16, MSB(op_AND_341), DUP(op_AND_341))), CAST(16, MSB(DUP(op_AND_341)), DUP(op_AND_341)))); + RzILOpPure *op_LSHIFT_347 = SHIFTL0(CAST(64, MSB(op_MUL_344), DUP(op_MUL_344)), SN(32, 1)); + RzILOpPure *op_ADD_348 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_325), DUP(op_AND_325))), CAST(32, MSB(DUP(op_AND_325)), DUP(op_AND_325))), op_LSHIFT_347); + RzILOpPure *op_RSHIFT_352 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_354 = LOGAND(op_RSHIFT_352, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_360 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_362 = LOGAND(op_RSHIFT_360, SN(32, 0xffff)); + RzILOpPure *op_MUL_365 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_354), DUP(op_AND_354))), CAST(16, MSB(DUP(op_AND_354)), DUP(op_AND_354))), CAST(32, MSB(CAST(16, MSB(op_AND_362), DUP(op_AND_362))), CAST(16, MSB(DUP(op_AND_362)), DUP(op_AND_362)))); + RzILOpPure *op_LSHIFT_368 = SHIFTL0(CAST(64, MSB(op_MUL_365), DUP(op_MUL_365)), SN(32, 1)); + RzILOpPure *op_ADD_369 = ADD(op_ADD_348, op_LSHIFT_368); + RzILOpPure *op_EQ_370 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_314), SN(32, 0), SN(32, 0x20)), op_ADD_369); + RzILOpPure *op_RSHIFT_427 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_429 = LOGAND(op_RSHIFT_427, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_435 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_437 = LOGAND(op_RSHIFT_435, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_443 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_445 = LOGAND(op_RSHIFT_443, SN(32, 0xffff)); + RzILOpPure *op_MUL_448 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_437), DUP(op_AND_437))), CAST(16, MSB(DUP(op_AND_437)), DUP(op_AND_437))), CAST(32, MSB(CAST(16, MSB(op_AND_445), DUP(op_AND_445))), CAST(16, MSB(DUP(op_AND_445)), DUP(op_AND_445)))); + RzILOpPure *op_LSHIFT_451 = SHIFTL0(CAST(64, MSB(op_MUL_448), DUP(op_MUL_448)), SN(32, 1)); + RzILOpPure *op_ADD_452 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_429), DUP(op_AND_429))), CAST(32, MSB(DUP(op_AND_429)), DUP(op_AND_429))), op_LSHIFT_451); + RzILOpPure *op_RSHIFT_456 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_458 = LOGAND(op_RSHIFT_456, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_464 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_466 = LOGAND(op_RSHIFT_464, SN(32, 0xffff)); + RzILOpPure *op_MUL_469 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_458), DUP(op_AND_458))), CAST(16, MSB(DUP(op_AND_458)), DUP(op_AND_458))), CAST(32, MSB(CAST(16, MSB(op_AND_466), DUP(op_AND_466))), CAST(16, MSB(DUP(op_AND_466)), DUP(op_AND_466)))); + RzILOpPure *op_LSHIFT_472 = SHIFTL0(CAST(64, MSB(op_MUL_469), DUP(op_MUL_469)), SN(32, 1)); + RzILOpPure *op_ADD_473 = ADD(op_ADD_452, op_LSHIFT_472); + RzILOpPure *op_LT_476 = SLT(op_ADD_473, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_481 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_482 = NEG(op_LSHIFT_481); + RzILOpPure *op_LSHIFT_487 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_490 = SUB(op_LSHIFT_487, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_491 = ITE(op_LT_476, op_NEG_482, op_SUB_490); + RzILOpEffect *gcc_expr_492 = BRANCH(op_EQ_370, EMPTY(), set_usr_field_call_423); + + // h_tmp239 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_494 = SETL("h_tmp239", cond_491); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_495 = SEQN(2, gcc_expr_492, op_ASSIGN_hybrid_tmp_494); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)) ? ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) : h_tmp239) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_259 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_260 = LOGNOT(op_LSHIFT_259); + RzILOpPure *op_AND_261 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_260); + RzILOpPure *op_RSHIFT_374 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_376 = LOGAND(op_RSHIFT_374, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_382 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_384 = LOGAND(op_RSHIFT_382, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_390 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_392 = LOGAND(op_RSHIFT_390, SN(32, 0xffff)); + RzILOpPure *op_MUL_395 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_384), DUP(op_AND_384))), CAST(16, MSB(DUP(op_AND_384)), DUP(op_AND_384))), CAST(32, MSB(CAST(16, MSB(op_AND_392), DUP(op_AND_392))), CAST(16, MSB(DUP(op_AND_392)), DUP(op_AND_392)))); + RzILOpPure *op_LSHIFT_398 = SHIFTL0(CAST(64, MSB(op_MUL_395), DUP(op_MUL_395)), SN(32, 1)); + RzILOpPure *op_ADD_399 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_376), DUP(op_AND_376))), CAST(32, MSB(DUP(op_AND_376)), DUP(op_AND_376))), op_LSHIFT_398); + RzILOpPure *op_RSHIFT_403 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_405 = LOGAND(op_RSHIFT_403, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_411 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_413 = LOGAND(op_RSHIFT_411, SN(32, 0xffff)); + RzILOpPure *op_MUL_416 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_405), DUP(op_AND_405))), CAST(16, MSB(DUP(op_AND_405)), DUP(op_AND_405))), CAST(32, MSB(CAST(16, MSB(op_AND_413), DUP(op_AND_413))), CAST(16, MSB(DUP(op_AND_413)), DUP(op_AND_413)))); + RzILOpPure *op_LSHIFT_419 = SHIFTL0(CAST(64, MSB(op_MUL_416), DUP(op_MUL_416)), SN(32, 1)); + RzILOpPure *op_ADD_420 = ADD(op_ADD_399, op_LSHIFT_419); + RzILOpPure *cond_496 = ITE(DUP(op_EQ_370), op_ADD_420, VARL("h_tmp239")); + RzILOpPure *op_AND_498 = LOGAND(cond_496, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_502 = SHIFTL0(op_AND_498, SN(32, 0)); + RzILOpPure *op_OR_503 = LOGOR(op_AND_261, op_LSHIFT_502); + RzILOpEffect *op_ASSIGN_504 = WRITE_REG(bundle, Rxx_op, op_OR_503); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_505 = SEQN(2, seq_495, op_ASSIGN_504); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_253, seq_505); + return instruction_sequence; +} + +// Rdd = cmpyi(Rs,Rt) +RzILOpEffect *hex_il_op_m2_cmpyi_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rdd = ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) + ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_RSHIFT_24 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_26 = LOGAND(op_RSHIFT_24, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_32 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_34 = LOGAND(op_RSHIFT_32, SN(32, 0xffff)); + RzILOpPure *op_MUL_37 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_26), DUP(op_AND_26))), CAST(16, MSB(DUP(op_AND_26)), DUP(op_AND_26))), CAST(32, MSB(CAST(16, MSB(op_AND_34), DUP(op_AND_34))), CAST(16, MSB(DUP(op_AND_34)), DUP(op_AND_34)))); + RzILOpPure *op_ADD_39 = ADD(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), CAST(64, MSB(op_MUL_37), DUP(op_MUL_37))); + RzILOpEffect *op_ASSIGN_40 = WRITE_REG(bundle, Rdd_op, op_ADD_39); + + RzILOpEffect *instruction_sequence = op_ASSIGN_40; + return instruction_sequence; +} + +// Rdd = cmpyr(Rs,Rt) +RzILOpEffect *hex_il_op_m2_cmpyr_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rdd = ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) - ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_RSHIFT_24 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_26 = LOGAND(op_RSHIFT_24, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_32 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_34 = LOGAND(op_RSHIFT_32, SN(32, 0xffff)); + RzILOpPure *op_MUL_37 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_26), DUP(op_AND_26))), CAST(16, MSB(DUP(op_AND_26)), DUP(op_AND_26))), CAST(32, MSB(CAST(16, MSB(op_AND_34), DUP(op_AND_34))), CAST(16, MSB(DUP(op_AND_34)), DUP(op_AND_34)))); + RzILOpPure *op_SUB_39 = SUB(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), CAST(64, MSB(op_MUL_37), DUP(op_MUL_37))); + RzILOpEffect *op_ASSIGN_40 = WRITE_REG(bundle, Rdd_op, op_SUB_39); + + RzILOpEffect *instruction_sequence = op_ASSIGN_40; + return instruction_sequence; +} + +// Rd = cmpy(Rs,Rt):rnd:sat +RzILOpEffect *hex_il_op_m2_cmpyrs_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_154 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_25 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_27 = LOGAND(op_RSHIFT_25, SN(32, 0xffff)); + RzILOpPure *op_MUL_30 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(CAST(16, MSB(op_AND_27), DUP(op_AND_27))), CAST(16, MSB(DUP(op_AND_27)), DUP(op_AND_27)))); + RzILOpPure *op_LSHIFT_33 = SHIFTL0(CAST(64, MSB(op_MUL_30), DUP(op_MUL_30)), SN(32, 0)); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_39 = LOGAND(op_RSHIFT_37, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_45 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_47 = LOGAND(op_RSHIFT_45, SN(32, 0xffff)); + RzILOpPure *op_MUL_50 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_39), DUP(op_AND_39))), CAST(16, MSB(DUP(op_AND_39)), DUP(op_AND_39))), CAST(32, MSB(CAST(16, MSB(op_AND_47), DUP(op_AND_47))), CAST(16, MSB(DUP(op_AND_47)), DUP(op_AND_47)))); + RzILOpPure *op_LSHIFT_53 = SHIFTL0(CAST(64, MSB(op_MUL_50), DUP(op_MUL_50)), SN(32, 0)); + RzILOpPure *op_ADD_54 = ADD(op_LSHIFT_33, op_LSHIFT_53); + RzILOpPure *op_ADD_57 = ADD(op_ADD_54, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_66 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_68 = LOGAND(op_RSHIFT_66, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_74 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_76 = LOGAND(op_RSHIFT_74, SN(32, 0xffff)); + RzILOpPure *op_MUL_79 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_68), DUP(op_AND_68))), CAST(16, MSB(DUP(op_AND_68)), DUP(op_AND_68))), CAST(32, MSB(CAST(16, MSB(op_AND_76), DUP(op_AND_76))), CAST(16, MSB(DUP(op_AND_76)), DUP(op_AND_76)))); + RzILOpPure *op_LSHIFT_82 = SHIFTL0(CAST(64, MSB(op_MUL_79), DUP(op_MUL_79)), SN(32, 0)); + RzILOpPure *op_RSHIFT_86 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_88 = LOGAND(op_RSHIFT_86, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_94 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_96 = LOGAND(op_RSHIFT_94, SN(32, 0xffff)); + RzILOpPure *op_MUL_99 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_88), DUP(op_AND_88))), CAST(16, MSB(DUP(op_AND_88)), DUP(op_AND_88))), CAST(32, MSB(CAST(16, MSB(op_AND_96), DUP(op_AND_96))), CAST(16, MSB(DUP(op_AND_96)), DUP(op_AND_96)))); + RzILOpPure *op_LSHIFT_102 = SHIFTL0(CAST(64, MSB(op_MUL_99), DUP(op_MUL_99)), SN(32, 0)); + RzILOpPure *op_ADD_103 = ADD(op_LSHIFT_82, op_LSHIFT_102); + RzILOpPure *op_ADD_106 = ADD(op_ADD_103, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_EQ_107 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_57), SN(32, 0), SN(32, 0x20)), op_ADD_106); + RzILOpPure *op_RSHIFT_158 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_160 = LOGAND(op_RSHIFT_158, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_166 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_168 = LOGAND(op_RSHIFT_166, SN(32, 0xffff)); + RzILOpPure *op_MUL_171 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_160), DUP(op_AND_160))), CAST(16, MSB(DUP(op_AND_160)), DUP(op_AND_160))), CAST(32, MSB(CAST(16, MSB(op_AND_168), DUP(op_AND_168))), CAST(16, MSB(DUP(op_AND_168)), DUP(op_AND_168)))); + RzILOpPure *op_LSHIFT_174 = SHIFTL0(CAST(64, MSB(op_MUL_171), DUP(op_MUL_171)), SN(32, 0)); + RzILOpPure *op_RSHIFT_178 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_180 = LOGAND(op_RSHIFT_178, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_186 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_188 = LOGAND(op_RSHIFT_186, SN(32, 0xffff)); + RzILOpPure *op_MUL_191 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_180), DUP(op_AND_180))), CAST(16, MSB(DUP(op_AND_180)), DUP(op_AND_180))), CAST(32, MSB(CAST(16, MSB(op_AND_188), DUP(op_AND_188))), CAST(16, MSB(DUP(op_AND_188)), DUP(op_AND_188)))); + RzILOpPure *op_LSHIFT_194 = SHIFTL0(CAST(64, MSB(op_MUL_191), DUP(op_MUL_191)), SN(32, 0)); + RzILOpPure *op_ADD_195 = ADD(op_LSHIFT_174, op_LSHIFT_194); + RzILOpPure *op_ADD_198 = ADD(op_ADD_195, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_LT_201 = SLT(op_ADD_198, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_206 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_207 = NEG(op_LSHIFT_206); + RzILOpPure *op_LSHIFT_212 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_215 = SUB(op_LSHIFT_212, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_216 = ITE(op_LT_201, op_NEG_207, op_SUB_215); + RzILOpEffect *gcc_expr_217 = BRANCH(op_EQ_107, EMPTY(), set_usr_field_call_154); + + // h_tmp240 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_219 = SETL("h_tmp240", cond_216); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_220 = SEQN(2, gcc_expr_217, op_ASSIGN_hybrid_tmp_219); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << 0x10)))) | (((ut64) (((st32) ((st16) ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000)) ? (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000) : h_tmp240) >> 0x10) & ((st64) 0xffff)))) & 0xffff)) << 0x10))); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffff), SN(32, 16)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_8 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_6); + RzILOpPure *op_RSHIFT_111 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_113 = LOGAND(op_RSHIFT_111, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_119 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_121 = LOGAND(op_RSHIFT_119, SN(32, 0xffff)); + RzILOpPure *op_MUL_124 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_113), DUP(op_AND_113))), CAST(16, MSB(DUP(op_AND_113)), DUP(op_AND_113))), CAST(32, MSB(CAST(16, MSB(op_AND_121), DUP(op_AND_121))), CAST(16, MSB(DUP(op_AND_121)), DUP(op_AND_121)))); + RzILOpPure *op_LSHIFT_127 = SHIFTL0(CAST(64, MSB(op_MUL_124), DUP(op_MUL_124)), SN(32, 0)); + RzILOpPure *op_RSHIFT_131 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_133 = LOGAND(op_RSHIFT_131, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_139 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_141 = LOGAND(op_RSHIFT_139, SN(32, 0xffff)); + RzILOpPure *op_MUL_144 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_133), DUP(op_AND_133))), CAST(16, MSB(DUP(op_AND_133)), DUP(op_AND_133))), CAST(32, MSB(CAST(16, MSB(op_AND_141), DUP(op_AND_141))), CAST(16, MSB(DUP(op_AND_141)), DUP(op_AND_141)))); + RzILOpPure *op_LSHIFT_147 = SHIFTL0(CAST(64, MSB(op_MUL_144), DUP(op_MUL_144)), SN(32, 0)); + RzILOpPure *op_ADD_148 = ADD(op_LSHIFT_127, op_LSHIFT_147); + RzILOpPure *op_ADD_151 = ADD(op_ADD_148, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *cond_221 = ITE(DUP(op_EQ_107), op_ADD_151, VARL("h_tmp240")); + RzILOpPure *op_RSHIFT_225 = SHIFTRA(cond_221, SN(32, 16)); + RzILOpPure *op_AND_228 = LOGAND(op_RSHIFT_225, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_232 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_228), DUP(op_AND_228))), CAST(16, MSB(DUP(op_AND_228)), DUP(op_AND_228))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_237 = SHIFTL0(CAST(64, IL_FALSE, op_AND_232), SN(32, 16)); + RzILOpPure *op_OR_239 = LOGOR(CAST(64, IL_FALSE, op_AND_8), op_LSHIFT_237); + RzILOpEffect *op_ASSIGN_241 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_239)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_242 = SEQN(2, seq_220, op_ASSIGN_241); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_395 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_258 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_260 = LOGAND(op_RSHIFT_258, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_266 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_268 = LOGAND(op_RSHIFT_266, SN(32, 0xffff)); + RzILOpPure *op_MUL_271 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_260), DUP(op_AND_260))), CAST(16, MSB(DUP(op_AND_260)), DUP(op_AND_260))), CAST(32, MSB(CAST(16, MSB(op_AND_268), DUP(op_AND_268))), CAST(16, MSB(DUP(op_AND_268)), DUP(op_AND_268)))); + RzILOpPure *op_LSHIFT_274 = SHIFTL0(CAST(64, MSB(op_MUL_271), DUP(op_MUL_271)), SN(32, 0)); + RzILOpPure *op_RSHIFT_278 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_280 = LOGAND(op_RSHIFT_278, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_286 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_288 = LOGAND(op_RSHIFT_286, SN(32, 0xffff)); + RzILOpPure *op_MUL_291 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_280), DUP(op_AND_280))), CAST(16, MSB(DUP(op_AND_280)), DUP(op_AND_280))), CAST(32, MSB(CAST(16, MSB(op_AND_288), DUP(op_AND_288))), CAST(16, MSB(DUP(op_AND_288)), DUP(op_AND_288)))); + RzILOpPure *op_LSHIFT_294 = SHIFTL0(CAST(64, MSB(op_MUL_291), DUP(op_MUL_291)), SN(32, 0)); + RzILOpPure *op_SUB_295 = SUB(op_LSHIFT_274, op_LSHIFT_294); + RzILOpPure *op_ADD_298 = ADD(op_SUB_295, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_307 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_309 = LOGAND(op_RSHIFT_307, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_315 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_317 = LOGAND(op_RSHIFT_315, SN(32, 0xffff)); + RzILOpPure *op_MUL_320 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_309), DUP(op_AND_309))), CAST(16, MSB(DUP(op_AND_309)), DUP(op_AND_309))), CAST(32, MSB(CAST(16, MSB(op_AND_317), DUP(op_AND_317))), CAST(16, MSB(DUP(op_AND_317)), DUP(op_AND_317)))); + RzILOpPure *op_LSHIFT_323 = SHIFTL0(CAST(64, MSB(op_MUL_320), DUP(op_MUL_320)), SN(32, 0)); + RzILOpPure *op_RSHIFT_327 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_329 = LOGAND(op_RSHIFT_327, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_335 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_337 = LOGAND(op_RSHIFT_335, SN(32, 0xffff)); + RzILOpPure *op_MUL_340 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_329), DUP(op_AND_329))), CAST(16, MSB(DUP(op_AND_329)), DUP(op_AND_329))), CAST(32, MSB(CAST(16, MSB(op_AND_337), DUP(op_AND_337))), CAST(16, MSB(DUP(op_AND_337)), DUP(op_AND_337)))); + RzILOpPure *op_LSHIFT_343 = SHIFTL0(CAST(64, MSB(op_MUL_340), DUP(op_MUL_340)), SN(32, 0)); + RzILOpPure *op_SUB_344 = SUB(op_LSHIFT_323, op_LSHIFT_343); + RzILOpPure *op_ADD_347 = ADD(op_SUB_344, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_EQ_348 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_298), SN(32, 0), SN(32, 0x20)), op_ADD_347); + RzILOpPure *op_RSHIFT_399 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_401 = LOGAND(op_RSHIFT_399, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_407 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_409 = LOGAND(op_RSHIFT_407, SN(32, 0xffff)); + RzILOpPure *op_MUL_412 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_401), DUP(op_AND_401))), CAST(16, MSB(DUP(op_AND_401)), DUP(op_AND_401))), CAST(32, MSB(CAST(16, MSB(op_AND_409), DUP(op_AND_409))), CAST(16, MSB(DUP(op_AND_409)), DUP(op_AND_409)))); + RzILOpPure *op_LSHIFT_415 = SHIFTL0(CAST(64, MSB(op_MUL_412), DUP(op_MUL_412)), SN(32, 0)); + RzILOpPure *op_RSHIFT_419 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_421 = LOGAND(op_RSHIFT_419, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_427 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_429 = LOGAND(op_RSHIFT_427, SN(32, 0xffff)); + RzILOpPure *op_MUL_432 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_421), DUP(op_AND_421))), CAST(16, MSB(DUP(op_AND_421)), DUP(op_AND_421))), CAST(32, MSB(CAST(16, MSB(op_AND_429), DUP(op_AND_429))), CAST(16, MSB(DUP(op_AND_429)), DUP(op_AND_429)))); + RzILOpPure *op_LSHIFT_435 = SHIFTL0(CAST(64, MSB(op_MUL_432), DUP(op_MUL_432)), SN(32, 0)); + RzILOpPure *op_SUB_436 = SUB(op_LSHIFT_415, op_LSHIFT_435); + RzILOpPure *op_ADD_439 = ADD(op_SUB_436, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_LT_442 = SLT(op_ADD_439, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_447 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_448 = NEG(op_LSHIFT_447); + RzILOpPure *op_LSHIFT_453 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_456 = SUB(op_LSHIFT_453, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_457 = ITE(op_LT_442, op_NEG_448, op_SUB_456); + RzILOpEffect *gcc_expr_458 = BRANCH(op_EQ_348, EMPTY(), set_usr_field_call_395); + + // h_tmp241 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_460 = SETL("h_tmp241", cond_457); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_461 = SEQN(2, gcc_expr_458, op_ASSIGN_hybrid_tmp_460); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << 0x0)))) | (((ut64) (((st32) ((st16) ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000)) ? (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000) : h_tmp241) >> 0x10) & ((st64) 0xffff)))) & 0xffff)) << 0x0))); + RzILOpPure *op_LSHIFT_248 = SHIFTL0(SN(64, 0xffff), SN(32, 0)); + RzILOpPure *op_NOT_249 = LOGNOT(op_LSHIFT_248); + RzILOpPure *op_AND_251 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_249); + RzILOpPure *op_RSHIFT_352 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_354 = LOGAND(op_RSHIFT_352, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_360 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_362 = LOGAND(op_RSHIFT_360, SN(32, 0xffff)); + RzILOpPure *op_MUL_365 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_354), DUP(op_AND_354))), CAST(16, MSB(DUP(op_AND_354)), DUP(op_AND_354))), CAST(32, MSB(CAST(16, MSB(op_AND_362), DUP(op_AND_362))), CAST(16, MSB(DUP(op_AND_362)), DUP(op_AND_362)))); + RzILOpPure *op_LSHIFT_368 = SHIFTL0(CAST(64, MSB(op_MUL_365), DUP(op_MUL_365)), SN(32, 0)); + RzILOpPure *op_RSHIFT_372 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_374 = LOGAND(op_RSHIFT_372, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_380 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_382 = LOGAND(op_RSHIFT_380, SN(32, 0xffff)); + RzILOpPure *op_MUL_385 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_374), DUP(op_AND_374))), CAST(16, MSB(DUP(op_AND_374)), DUP(op_AND_374))), CAST(32, MSB(CAST(16, MSB(op_AND_382), DUP(op_AND_382))), CAST(16, MSB(DUP(op_AND_382)), DUP(op_AND_382)))); + RzILOpPure *op_LSHIFT_388 = SHIFTL0(CAST(64, MSB(op_MUL_385), DUP(op_MUL_385)), SN(32, 0)); + RzILOpPure *op_SUB_389 = SUB(op_LSHIFT_368, op_LSHIFT_388); + RzILOpPure *op_ADD_392 = ADD(op_SUB_389, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *cond_462 = ITE(DUP(op_EQ_348), op_ADD_392, VARL("h_tmp241")); + RzILOpPure *op_RSHIFT_466 = SHIFTRA(cond_462, SN(32, 16)); + RzILOpPure *op_AND_469 = LOGAND(op_RSHIFT_466, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_473 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_469), DUP(op_AND_469))), CAST(16, MSB(DUP(op_AND_469)), DUP(op_AND_469))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_478 = SHIFTL0(CAST(64, IL_FALSE, op_AND_473), SN(32, 0)); + RzILOpPure *op_OR_480 = LOGOR(CAST(64, IL_FALSE, op_AND_251), op_LSHIFT_478); + RzILOpEffect *op_ASSIGN_482 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_480)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_483 = SEQN(2, seq_461, op_ASSIGN_482); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_242, seq_483); + return instruction_sequence; +} + +// Rd = cmpy(Rs,Rt):<<1:rnd:sat +RzILOpEffect *hex_il_op_m2_cmpyrs_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_154 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_25 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_27 = LOGAND(op_RSHIFT_25, SN(32, 0xffff)); + RzILOpPure *op_MUL_30 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(CAST(16, MSB(op_AND_27), DUP(op_AND_27))), CAST(16, MSB(DUP(op_AND_27)), DUP(op_AND_27)))); + RzILOpPure *op_LSHIFT_33 = SHIFTL0(CAST(64, MSB(op_MUL_30), DUP(op_MUL_30)), SN(32, 1)); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_39 = LOGAND(op_RSHIFT_37, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_45 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_47 = LOGAND(op_RSHIFT_45, SN(32, 0xffff)); + RzILOpPure *op_MUL_50 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_39), DUP(op_AND_39))), CAST(16, MSB(DUP(op_AND_39)), DUP(op_AND_39))), CAST(32, MSB(CAST(16, MSB(op_AND_47), DUP(op_AND_47))), CAST(16, MSB(DUP(op_AND_47)), DUP(op_AND_47)))); + RzILOpPure *op_LSHIFT_53 = SHIFTL0(CAST(64, MSB(op_MUL_50), DUP(op_MUL_50)), SN(32, 1)); + RzILOpPure *op_ADD_54 = ADD(op_LSHIFT_33, op_LSHIFT_53); + RzILOpPure *op_ADD_57 = ADD(op_ADD_54, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_66 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_68 = LOGAND(op_RSHIFT_66, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_74 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_76 = LOGAND(op_RSHIFT_74, SN(32, 0xffff)); + RzILOpPure *op_MUL_79 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_68), DUP(op_AND_68))), CAST(16, MSB(DUP(op_AND_68)), DUP(op_AND_68))), CAST(32, MSB(CAST(16, MSB(op_AND_76), DUP(op_AND_76))), CAST(16, MSB(DUP(op_AND_76)), DUP(op_AND_76)))); + RzILOpPure *op_LSHIFT_82 = SHIFTL0(CAST(64, MSB(op_MUL_79), DUP(op_MUL_79)), SN(32, 1)); + RzILOpPure *op_RSHIFT_86 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_88 = LOGAND(op_RSHIFT_86, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_94 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_96 = LOGAND(op_RSHIFT_94, SN(32, 0xffff)); + RzILOpPure *op_MUL_99 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_88), DUP(op_AND_88))), CAST(16, MSB(DUP(op_AND_88)), DUP(op_AND_88))), CAST(32, MSB(CAST(16, MSB(op_AND_96), DUP(op_AND_96))), CAST(16, MSB(DUP(op_AND_96)), DUP(op_AND_96)))); + RzILOpPure *op_LSHIFT_102 = SHIFTL0(CAST(64, MSB(op_MUL_99), DUP(op_MUL_99)), SN(32, 1)); + RzILOpPure *op_ADD_103 = ADD(op_LSHIFT_82, op_LSHIFT_102); + RzILOpPure *op_ADD_106 = ADD(op_ADD_103, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_EQ_107 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_57), SN(32, 0), SN(32, 0x20)), op_ADD_106); + RzILOpPure *op_RSHIFT_158 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_160 = LOGAND(op_RSHIFT_158, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_166 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_168 = LOGAND(op_RSHIFT_166, SN(32, 0xffff)); + RzILOpPure *op_MUL_171 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_160), DUP(op_AND_160))), CAST(16, MSB(DUP(op_AND_160)), DUP(op_AND_160))), CAST(32, MSB(CAST(16, MSB(op_AND_168), DUP(op_AND_168))), CAST(16, MSB(DUP(op_AND_168)), DUP(op_AND_168)))); + RzILOpPure *op_LSHIFT_174 = SHIFTL0(CAST(64, MSB(op_MUL_171), DUP(op_MUL_171)), SN(32, 1)); + RzILOpPure *op_RSHIFT_178 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_180 = LOGAND(op_RSHIFT_178, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_186 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_188 = LOGAND(op_RSHIFT_186, SN(32, 0xffff)); + RzILOpPure *op_MUL_191 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_180), DUP(op_AND_180))), CAST(16, MSB(DUP(op_AND_180)), DUP(op_AND_180))), CAST(32, MSB(CAST(16, MSB(op_AND_188), DUP(op_AND_188))), CAST(16, MSB(DUP(op_AND_188)), DUP(op_AND_188)))); + RzILOpPure *op_LSHIFT_194 = SHIFTL0(CAST(64, MSB(op_MUL_191), DUP(op_MUL_191)), SN(32, 1)); + RzILOpPure *op_ADD_195 = ADD(op_LSHIFT_174, op_LSHIFT_194); + RzILOpPure *op_ADD_198 = ADD(op_ADD_195, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_LT_201 = SLT(op_ADD_198, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_206 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_207 = NEG(op_LSHIFT_206); + RzILOpPure *op_LSHIFT_212 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_215 = SUB(op_LSHIFT_212, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_216 = ITE(op_LT_201, op_NEG_207, op_SUB_215); + RzILOpEffect *gcc_expr_217 = BRANCH(op_EQ_107, EMPTY(), set_usr_field_call_154); + + // h_tmp242 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_219 = SETL("h_tmp242", cond_216); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_220 = SEQN(2, gcc_expr_217, op_ASSIGN_hybrid_tmp_219); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << 0x10)))) | (((ut64) (((st32) ((st16) ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000)) ? (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000) : h_tmp242) >> 0x10) & ((st64) 0xffff)))) & 0xffff)) << 0x10))); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffff), SN(32, 16)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_8 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_6); + RzILOpPure *op_RSHIFT_111 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_113 = LOGAND(op_RSHIFT_111, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_119 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_121 = LOGAND(op_RSHIFT_119, SN(32, 0xffff)); + RzILOpPure *op_MUL_124 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_113), DUP(op_AND_113))), CAST(16, MSB(DUP(op_AND_113)), DUP(op_AND_113))), CAST(32, MSB(CAST(16, MSB(op_AND_121), DUP(op_AND_121))), CAST(16, MSB(DUP(op_AND_121)), DUP(op_AND_121)))); + RzILOpPure *op_LSHIFT_127 = SHIFTL0(CAST(64, MSB(op_MUL_124), DUP(op_MUL_124)), SN(32, 1)); + RzILOpPure *op_RSHIFT_131 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_133 = LOGAND(op_RSHIFT_131, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_139 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_141 = LOGAND(op_RSHIFT_139, SN(32, 0xffff)); + RzILOpPure *op_MUL_144 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_133), DUP(op_AND_133))), CAST(16, MSB(DUP(op_AND_133)), DUP(op_AND_133))), CAST(32, MSB(CAST(16, MSB(op_AND_141), DUP(op_AND_141))), CAST(16, MSB(DUP(op_AND_141)), DUP(op_AND_141)))); + RzILOpPure *op_LSHIFT_147 = SHIFTL0(CAST(64, MSB(op_MUL_144), DUP(op_MUL_144)), SN(32, 1)); + RzILOpPure *op_ADD_148 = ADD(op_LSHIFT_127, op_LSHIFT_147); + RzILOpPure *op_ADD_151 = ADD(op_ADD_148, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *cond_221 = ITE(DUP(op_EQ_107), op_ADD_151, VARL("h_tmp242")); + RzILOpPure *op_RSHIFT_225 = SHIFTRA(cond_221, SN(32, 16)); + RzILOpPure *op_AND_228 = LOGAND(op_RSHIFT_225, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_232 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_228), DUP(op_AND_228))), CAST(16, MSB(DUP(op_AND_228)), DUP(op_AND_228))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_237 = SHIFTL0(CAST(64, IL_FALSE, op_AND_232), SN(32, 16)); + RzILOpPure *op_OR_239 = LOGOR(CAST(64, IL_FALSE, op_AND_8), op_LSHIFT_237); + RzILOpEffect *op_ASSIGN_241 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_239)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_242 = SEQN(2, seq_220, op_ASSIGN_241); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_395 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_258 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_260 = LOGAND(op_RSHIFT_258, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_266 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_268 = LOGAND(op_RSHIFT_266, SN(32, 0xffff)); + RzILOpPure *op_MUL_271 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_260), DUP(op_AND_260))), CAST(16, MSB(DUP(op_AND_260)), DUP(op_AND_260))), CAST(32, MSB(CAST(16, MSB(op_AND_268), DUP(op_AND_268))), CAST(16, MSB(DUP(op_AND_268)), DUP(op_AND_268)))); + RzILOpPure *op_LSHIFT_274 = SHIFTL0(CAST(64, MSB(op_MUL_271), DUP(op_MUL_271)), SN(32, 1)); + RzILOpPure *op_RSHIFT_278 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_280 = LOGAND(op_RSHIFT_278, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_286 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_288 = LOGAND(op_RSHIFT_286, SN(32, 0xffff)); + RzILOpPure *op_MUL_291 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_280), DUP(op_AND_280))), CAST(16, MSB(DUP(op_AND_280)), DUP(op_AND_280))), CAST(32, MSB(CAST(16, MSB(op_AND_288), DUP(op_AND_288))), CAST(16, MSB(DUP(op_AND_288)), DUP(op_AND_288)))); + RzILOpPure *op_LSHIFT_294 = SHIFTL0(CAST(64, MSB(op_MUL_291), DUP(op_MUL_291)), SN(32, 1)); + RzILOpPure *op_SUB_295 = SUB(op_LSHIFT_274, op_LSHIFT_294); + RzILOpPure *op_ADD_298 = ADD(op_SUB_295, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_307 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_309 = LOGAND(op_RSHIFT_307, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_315 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_317 = LOGAND(op_RSHIFT_315, SN(32, 0xffff)); + RzILOpPure *op_MUL_320 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_309), DUP(op_AND_309))), CAST(16, MSB(DUP(op_AND_309)), DUP(op_AND_309))), CAST(32, MSB(CAST(16, MSB(op_AND_317), DUP(op_AND_317))), CAST(16, MSB(DUP(op_AND_317)), DUP(op_AND_317)))); + RzILOpPure *op_LSHIFT_323 = SHIFTL0(CAST(64, MSB(op_MUL_320), DUP(op_MUL_320)), SN(32, 1)); + RzILOpPure *op_RSHIFT_327 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_329 = LOGAND(op_RSHIFT_327, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_335 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_337 = LOGAND(op_RSHIFT_335, SN(32, 0xffff)); + RzILOpPure *op_MUL_340 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_329), DUP(op_AND_329))), CAST(16, MSB(DUP(op_AND_329)), DUP(op_AND_329))), CAST(32, MSB(CAST(16, MSB(op_AND_337), DUP(op_AND_337))), CAST(16, MSB(DUP(op_AND_337)), DUP(op_AND_337)))); + RzILOpPure *op_LSHIFT_343 = SHIFTL0(CAST(64, MSB(op_MUL_340), DUP(op_MUL_340)), SN(32, 1)); + RzILOpPure *op_SUB_344 = SUB(op_LSHIFT_323, op_LSHIFT_343); + RzILOpPure *op_ADD_347 = ADD(op_SUB_344, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_EQ_348 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_298), SN(32, 0), SN(32, 0x20)), op_ADD_347); + RzILOpPure *op_RSHIFT_399 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_401 = LOGAND(op_RSHIFT_399, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_407 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_409 = LOGAND(op_RSHIFT_407, SN(32, 0xffff)); + RzILOpPure *op_MUL_412 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_401), DUP(op_AND_401))), CAST(16, MSB(DUP(op_AND_401)), DUP(op_AND_401))), CAST(32, MSB(CAST(16, MSB(op_AND_409), DUP(op_AND_409))), CAST(16, MSB(DUP(op_AND_409)), DUP(op_AND_409)))); + RzILOpPure *op_LSHIFT_415 = SHIFTL0(CAST(64, MSB(op_MUL_412), DUP(op_MUL_412)), SN(32, 1)); + RzILOpPure *op_RSHIFT_419 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_421 = LOGAND(op_RSHIFT_419, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_427 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_429 = LOGAND(op_RSHIFT_427, SN(32, 0xffff)); + RzILOpPure *op_MUL_432 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_421), DUP(op_AND_421))), CAST(16, MSB(DUP(op_AND_421)), DUP(op_AND_421))), CAST(32, MSB(CAST(16, MSB(op_AND_429), DUP(op_AND_429))), CAST(16, MSB(DUP(op_AND_429)), DUP(op_AND_429)))); + RzILOpPure *op_LSHIFT_435 = SHIFTL0(CAST(64, MSB(op_MUL_432), DUP(op_MUL_432)), SN(32, 1)); + RzILOpPure *op_SUB_436 = SUB(op_LSHIFT_415, op_LSHIFT_435); + RzILOpPure *op_ADD_439 = ADD(op_SUB_436, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_LT_442 = SLT(op_ADD_439, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_447 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_448 = NEG(op_LSHIFT_447); + RzILOpPure *op_LSHIFT_453 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_456 = SUB(op_LSHIFT_453, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_457 = ITE(op_LT_442, op_NEG_448, op_SUB_456); + RzILOpEffect *gcc_expr_458 = BRANCH(op_EQ_348, EMPTY(), set_usr_field_call_395); + + // h_tmp243 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_460 = SETL("h_tmp243", cond_457); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_461 = SEQN(2, gcc_expr_458, op_ASSIGN_hybrid_tmp_460); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << 0x0)))) | (((ut64) (((st32) ((st16) ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000)) ? (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000) : h_tmp243) >> 0x10) & ((st64) 0xffff)))) & 0xffff)) << 0x0))); + RzILOpPure *op_LSHIFT_248 = SHIFTL0(SN(64, 0xffff), SN(32, 0)); + RzILOpPure *op_NOT_249 = LOGNOT(op_LSHIFT_248); + RzILOpPure *op_AND_251 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_249); + RzILOpPure *op_RSHIFT_352 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_354 = LOGAND(op_RSHIFT_352, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_360 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_362 = LOGAND(op_RSHIFT_360, SN(32, 0xffff)); + RzILOpPure *op_MUL_365 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_354), DUP(op_AND_354))), CAST(16, MSB(DUP(op_AND_354)), DUP(op_AND_354))), CAST(32, MSB(CAST(16, MSB(op_AND_362), DUP(op_AND_362))), CAST(16, MSB(DUP(op_AND_362)), DUP(op_AND_362)))); + RzILOpPure *op_LSHIFT_368 = SHIFTL0(CAST(64, MSB(op_MUL_365), DUP(op_MUL_365)), SN(32, 1)); + RzILOpPure *op_RSHIFT_372 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_374 = LOGAND(op_RSHIFT_372, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_380 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_382 = LOGAND(op_RSHIFT_380, SN(32, 0xffff)); + RzILOpPure *op_MUL_385 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_374), DUP(op_AND_374))), CAST(16, MSB(DUP(op_AND_374)), DUP(op_AND_374))), CAST(32, MSB(CAST(16, MSB(op_AND_382), DUP(op_AND_382))), CAST(16, MSB(DUP(op_AND_382)), DUP(op_AND_382)))); + RzILOpPure *op_LSHIFT_388 = SHIFTL0(CAST(64, MSB(op_MUL_385), DUP(op_MUL_385)), SN(32, 1)); + RzILOpPure *op_SUB_389 = SUB(op_LSHIFT_368, op_LSHIFT_388); + RzILOpPure *op_ADD_392 = ADD(op_SUB_389, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *cond_462 = ITE(DUP(op_EQ_348), op_ADD_392, VARL("h_tmp243")); + RzILOpPure *op_RSHIFT_466 = SHIFTRA(cond_462, SN(32, 16)); + RzILOpPure *op_AND_469 = LOGAND(op_RSHIFT_466, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_473 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_469), DUP(op_AND_469))), CAST(16, MSB(DUP(op_AND_469)), DUP(op_AND_469))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_478 = SHIFTL0(CAST(64, IL_FALSE, op_AND_473), SN(32, 0)); + RzILOpPure *op_OR_480 = LOGOR(CAST(64, IL_FALSE, op_AND_251), op_LSHIFT_478); + RzILOpEffect *op_ASSIGN_482 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_480)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_483 = SEQN(2, seq_461, op_ASSIGN_482); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_242, seq_483); + return instruction_sequence; +} + +// Rd = cmpy(Rs,Rt*):rnd:sat +RzILOpEffect *hex_il_op_m2_cmpyrsc_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_154 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_25 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_27 = LOGAND(op_RSHIFT_25, SN(32, 0xffff)); + RzILOpPure *op_MUL_30 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(CAST(16, MSB(op_AND_27), DUP(op_AND_27))), CAST(16, MSB(DUP(op_AND_27)), DUP(op_AND_27)))); + RzILOpPure *op_LSHIFT_33 = SHIFTL0(CAST(64, MSB(op_MUL_30), DUP(op_MUL_30)), SN(32, 0)); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_39 = LOGAND(op_RSHIFT_37, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_45 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_47 = LOGAND(op_RSHIFT_45, SN(32, 0xffff)); + RzILOpPure *op_MUL_50 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_39), DUP(op_AND_39))), CAST(16, MSB(DUP(op_AND_39)), DUP(op_AND_39))), CAST(32, MSB(CAST(16, MSB(op_AND_47), DUP(op_AND_47))), CAST(16, MSB(DUP(op_AND_47)), DUP(op_AND_47)))); + RzILOpPure *op_LSHIFT_53 = SHIFTL0(CAST(64, MSB(op_MUL_50), DUP(op_MUL_50)), SN(32, 0)); + RzILOpPure *op_SUB_54 = SUB(op_LSHIFT_33, op_LSHIFT_53); + RzILOpPure *op_ADD_57 = ADD(op_SUB_54, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_66 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_68 = LOGAND(op_RSHIFT_66, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_74 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_76 = LOGAND(op_RSHIFT_74, SN(32, 0xffff)); + RzILOpPure *op_MUL_79 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_68), DUP(op_AND_68))), CAST(16, MSB(DUP(op_AND_68)), DUP(op_AND_68))), CAST(32, MSB(CAST(16, MSB(op_AND_76), DUP(op_AND_76))), CAST(16, MSB(DUP(op_AND_76)), DUP(op_AND_76)))); + RzILOpPure *op_LSHIFT_82 = SHIFTL0(CAST(64, MSB(op_MUL_79), DUP(op_MUL_79)), SN(32, 0)); + RzILOpPure *op_RSHIFT_86 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_88 = LOGAND(op_RSHIFT_86, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_94 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_96 = LOGAND(op_RSHIFT_94, SN(32, 0xffff)); + RzILOpPure *op_MUL_99 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_88), DUP(op_AND_88))), CAST(16, MSB(DUP(op_AND_88)), DUP(op_AND_88))), CAST(32, MSB(CAST(16, MSB(op_AND_96), DUP(op_AND_96))), CAST(16, MSB(DUP(op_AND_96)), DUP(op_AND_96)))); + RzILOpPure *op_LSHIFT_102 = SHIFTL0(CAST(64, MSB(op_MUL_99), DUP(op_MUL_99)), SN(32, 0)); + RzILOpPure *op_SUB_103 = SUB(op_LSHIFT_82, op_LSHIFT_102); + RzILOpPure *op_ADD_106 = ADD(op_SUB_103, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_EQ_107 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_57), SN(32, 0), SN(32, 0x20)), op_ADD_106); + RzILOpPure *op_RSHIFT_158 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_160 = LOGAND(op_RSHIFT_158, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_166 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_168 = LOGAND(op_RSHIFT_166, SN(32, 0xffff)); + RzILOpPure *op_MUL_171 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_160), DUP(op_AND_160))), CAST(16, MSB(DUP(op_AND_160)), DUP(op_AND_160))), CAST(32, MSB(CAST(16, MSB(op_AND_168), DUP(op_AND_168))), CAST(16, MSB(DUP(op_AND_168)), DUP(op_AND_168)))); + RzILOpPure *op_LSHIFT_174 = SHIFTL0(CAST(64, MSB(op_MUL_171), DUP(op_MUL_171)), SN(32, 0)); + RzILOpPure *op_RSHIFT_178 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_180 = LOGAND(op_RSHIFT_178, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_186 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_188 = LOGAND(op_RSHIFT_186, SN(32, 0xffff)); + RzILOpPure *op_MUL_191 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_180), DUP(op_AND_180))), CAST(16, MSB(DUP(op_AND_180)), DUP(op_AND_180))), CAST(32, MSB(CAST(16, MSB(op_AND_188), DUP(op_AND_188))), CAST(16, MSB(DUP(op_AND_188)), DUP(op_AND_188)))); + RzILOpPure *op_LSHIFT_194 = SHIFTL0(CAST(64, MSB(op_MUL_191), DUP(op_MUL_191)), SN(32, 0)); + RzILOpPure *op_SUB_195 = SUB(op_LSHIFT_174, op_LSHIFT_194); + RzILOpPure *op_ADD_198 = ADD(op_SUB_195, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_LT_201 = SLT(op_ADD_198, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_206 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_207 = NEG(op_LSHIFT_206); + RzILOpPure *op_LSHIFT_212 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_215 = SUB(op_LSHIFT_212, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_216 = ITE(op_LT_201, op_NEG_207, op_SUB_215); + RzILOpEffect *gcc_expr_217 = BRANCH(op_EQ_107, EMPTY(), set_usr_field_call_154); + + // h_tmp244 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_219 = SETL("h_tmp244", cond_216); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_220 = SEQN(2, gcc_expr_217, op_ASSIGN_hybrid_tmp_219); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << 0x10)))) | (((ut64) (((st32) ((st16) ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000)) ? (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000) : h_tmp244) >> 0x10) & ((st64) 0xffff)))) & 0xffff)) << 0x10))); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffff), SN(32, 16)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_8 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_6); + RzILOpPure *op_RSHIFT_111 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_113 = LOGAND(op_RSHIFT_111, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_119 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_121 = LOGAND(op_RSHIFT_119, SN(32, 0xffff)); + RzILOpPure *op_MUL_124 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_113), DUP(op_AND_113))), CAST(16, MSB(DUP(op_AND_113)), DUP(op_AND_113))), CAST(32, MSB(CAST(16, MSB(op_AND_121), DUP(op_AND_121))), CAST(16, MSB(DUP(op_AND_121)), DUP(op_AND_121)))); + RzILOpPure *op_LSHIFT_127 = SHIFTL0(CAST(64, MSB(op_MUL_124), DUP(op_MUL_124)), SN(32, 0)); + RzILOpPure *op_RSHIFT_131 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_133 = LOGAND(op_RSHIFT_131, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_139 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_141 = LOGAND(op_RSHIFT_139, SN(32, 0xffff)); + RzILOpPure *op_MUL_144 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_133), DUP(op_AND_133))), CAST(16, MSB(DUP(op_AND_133)), DUP(op_AND_133))), CAST(32, MSB(CAST(16, MSB(op_AND_141), DUP(op_AND_141))), CAST(16, MSB(DUP(op_AND_141)), DUP(op_AND_141)))); + RzILOpPure *op_LSHIFT_147 = SHIFTL0(CAST(64, MSB(op_MUL_144), DUP(op_MUL_144)), SN(32, 0)); + RzILOpPure *op_SUB_148 = SUB(op_LSHIFT_127, op_LSHIFT_147); + RzILOpPure *op_ADD_151 = ADD(op_SUB_148, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *cond_221 = ITE(DUP(op_EQ_107), op_ADD_151, VARL("h_tmp244")); + RzILOpPure *op_RSHIFT_225 = SHIFTRA(cond_221, SN(32, 16)); + RzILOpPure *op_AND_228 = LOGAND(op_RSHIFT_225, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_232 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_228), DUP(op_AND_228))), CAST(16, MSB(DUP(op_AND_228)), DUP(op_AND_228))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_237 = SHIFTL0(CAST(64, IL_FALSE, op_AND_232), SN(32, 16)); + RzILOpPure *op_OR_239 = LOGOR(CAST(64, IL_FALSE, op_AND_8), op_LSHIFT_237); + RzILOpEffect *op_ASSIGN_241 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_239)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_242 = SEQN(2, seq_220, op_ASSIGN_241); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_395 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_258 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_260 = LOGAND(op_RSHIFT_258, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_266 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_268 = LOGAND(op_RSHIFT_266, SN(32, 0xffff)); + RzILOpPure *op_MUL_271 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_260), DUP(op_AND_260))), CAST(16, MSB(DUP(op_AND_260)), DUP(op_AND_260))), CAST(32, MSB(CAST(16, MSB(op_AND_268), DUP(op_AND_268))), CAST(16, MSB(DUP(op_AND_268)), DUP(op_AND_268)))); + RzILOpPure *op_LSHIFT_274 = SHIFTL0(CAST(64, MSB(op_MUL_271), DUP(op_MUL_271)), SN(32, 0)); + RzILOpPure *op_RSHIFT_278 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_280 = LOGAND(op_RSHIFT_278, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_286 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_288 = LOGAND(op_RSHIFT_286, SN(32, 0xffff)); + RzILOpPure *op_MUL_291 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_280), DUP(op_AND_280))), CAST(16, MSB(DUP(op_AND_280)), DUP(op_AND_280))), CAST(32, MSB(CAST(16, MSB(op_AND_288), DUP(op_AND_288))), CAST(16, MSB(DUP(op_AND_288)), DUP(op_AND_288)))); + RzILOpPure *op_LSHIFT_294 = SHIFTL0(CAST(64, MSB(op_MUL_291), DUP(op_MUL_291)), SN(32, 0)); + RzILOpPure *op_ADD_295 = ADD(op_LSHIFT_274, op_LSHIFT_294); + RzILOpPure *op_ADD_298 = ADD(op_ADD_295, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_307 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_309 = LOGAND(op_RSHIFT_307, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_315 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_317 = LOGAND(op_RSHIFT_315, SN(32, 0xffff)); + RzILOpPure *op_MUL_320 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_309), DUP(op_AND_309))), CAST(16, MSB(DUP(op_AND_309)), DUP(op_AND_309))), CAST(32, MSB(CAST(16, MSB(op_AND_317), DUP(op_AND_317))), CAST(16, MSB(DUP(op_AND_317)), DUP(op_AND_317)))); + RzILOpPure *op_LSHIFT_323 = SHIFTL0(CAST(64, MSB(op_MUL_320), DUP(op_MUL_320)), SN(32, 0)); + RzILOpPure *op_RSHIFT_327 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_329 = LOGAND(op_RSHIFT_327, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_335 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_337 = LOGAND(op_RSHIFT_335, SN(32, 0xffff)); + RzILOpPure *op_MUL_340 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_329), DUP(op_AND_329))), CAST(16, MSB(DUP(op_AND_329)), DUP(op_AND_329))), CAST(32, MSB(CAST(16, MSB(op_AND_337), DUP(op_AND_337))), CAST(16, MSB(DUP(op_AND_337)), DUP(op_AND_337)))); + RzILOpPure *op_LSHIFT_343 = SHIFTL0(CAST(64, MSB(op_MUL_340), DUP(op_MUL_340)), SN(32, 0)); + RzILOpPure *op_ADD_344 = ADD(op_LSHIFT_323, op_LSHIFT_343); + RzILOpPure *op_ADD_347 = ADD(op_ADD_344, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_EQ_348 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_298), SN(32, 0), SN(32, 0x20)), op_ADD_347); + RzILOpPure *op_RSHIFT_399 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_401 = LOGAND(op_RSHIFT_399, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_407 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_409 = LOGAND(op_RSHIFT_407, SN(32, 0xffff)); + RzILOpPure *op_MUL_412 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_401), DUP(op_AND_401))), CAST(16, MSB(DUP(op_AND_401)), DUP(op_AND_401))), CAST(32, MSB(CAST(16, MSB(op_AND_409), DUP(op_AND_409))), CAST(16, MSB(DUP(op_AND_409)), DUP(op_AND_409)))); + RzILOpPure *op_LSHIFT_415 = SHIFTL0(CAST(64, MSB(op_MUL_412), DUP(op_MUL_412)), SN(32, 0)); + RzILOpPure *op_RSHIFT_419 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_421 = LOGAND(op_RSHIFT_419, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_427 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_429 = LOGAND(op_RSHIFT_427, SN(32, 0xffff)); + RzILOpPure *op_MUL_432 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_421), DUP(op_AND_421))), CAST(16, MSB(DUP(op_AND_421)), DUP(op_AND_421))), CAST(32, MSB(CAST(16, MSB(op_AND_429), DUP(op_AND_429))), CAST(16, MSB(DUP(op_AND_429)), DUP(op_AND_429)))); + RzILOpPure *op_LSHIFT_435 = SHIFTL0(CAST(64, MSB(op_MUL_432), DUP(op_MUL_432)), SN(32, 0)); + RzILOpPure *op_ADD_436 = ADD(op_LSHIFT_415, op_LSHIFT_435); + RzILOpPure *op_ADD_439 = ADD(op_ADD_436, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_LT_442 = SLT(op_ADD_439, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_447 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_448 = NEG(op_LSHIFT_447); + RzILOpPure *op_LSHIFT_453 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_456 = SUB(op_LSHIFT_453, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_457 = ITE(op_LT_442, op_NEG_448, op_SUB_456); + RzILOpEffect *gcc_expr_458 = BRANCH(op_EQ_348, EMPTY(), set_usr_field_call_395); + + // h_tmp245 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_460 = SETL("h_tmp245", cond_457); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_461 = SEQN(2, gcc_expr_458, op_ASSIGN_hybrid_tmp_460); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << 0x0)))) | (((ut64) (((st32) ((st16) ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000)) ? (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000) : h_tmp245) >> 0x10) & ((st64) 0xffff)))) & 0xffff)) << 0x0))); + RzILOpPure *op_LSHIFT_248 = SHIFTL0(SN(64, 0xffff), SN(32, 0)); + RzILOpPure *op_NOT_249 = LOGNOT(op_LSHIFT_248); + RzILOpPure *op_AND_251 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_249); + RzILOpPure *op_RSHIFT_352 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_354 = LOGAND(op_RSHIFT_352, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_360 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_362 = LOGAND(op_RSHIFT_360, SN(32, 0xffff)); + RzILOpPure *op_MUL_365 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_354), DUP(op_AND_354))), CAST(16, MSB(DUP(op_AND_354)), DUP(op_AND_354))), CAST(32, MSB(CAST(16, MSB(op_AND_362), DUP(op_AND_362))), CAST(16, MSB(DUP(op_AND_362)), DUP(op_AND_362)))); + RzILOpPure *op_LSHIFT_368 = SHIFTL0(CAST(64, MSB(op_MUL_365), DUP(op_MUL_365)), SN(32, 0)); + RzILOpPure *op_RSHIFT_372 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_374 = LOGAND(op_RSHIFT_372, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_380 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_382 = LOGAND(op_RSHIFT_380, SN(32, 0xffff)); + RzILOpPure *op_MUL_385 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_374), DUP(op_AND_374))), CAST(16, MSB(DUP(op_AND_374)), DUP(op_AND_374))), CAST(32, MSB(CAST(16, MSB(op_AND_382), DUP(op_AND_382))), CAST(16, MSB(DUP(op_AND_382)), DUP(op_AND_382)))); + RzILOpPure *op_LSHIFT_388 = SHIFTL0(CAST(64, MSB(op_MUL_385), DUP(op_MUL_385)), SN(32, 0)); + RzILOpPure *op_ADD_389 = ADD(op_LSHIFT_368, op_LSHIFT_388); + RzILOpPure *op_ADD_392 = ADD(op_ADD_389, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *cond_462 = ITE(DUP(op_EQ_348), op_ADD_392, VARL("h_tmp245")); + RzILOpPure *op_RSHIFT_466 = SHIFTRA(cond_462, SN(32, 16)); + RzILOpPure *op_AND_469 = LOGAND(op_RSHIFT_466, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_473 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_469), DUP(op_AND_469))), CAST(16, MSB(DUP(op_AND_469)), DUP(op_AND_469))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_478 = SHIFTL0(CAST(64, IL_FALSE, op_AND_473), SN(32, 0)); + RzILOpPure *op_OR_480 = LOGOR(CAST(64, IL_FALSE, op_AND_251), op_LSHIFT_478); + RzILOpEffect *op_ASSIGN_482 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_480)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_483 = SEQN(2, seq_461, op_ASSIGN_482); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_242, seq_483); + return instruction_sequence; +} + +// Rd = cmpy(Rs,Rt*):<<1:rnd:sat +RzILOpEffect *hex_il_op_m2_cmpyrsc_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_154 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_25 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_27 = LOGAND(op_RSHIFT_25, SN(32, 0xffff)); + RzILOpPure *op_MUL_30 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(CAST(16, MSB(op_AND_27), DUP(op_AND_27))), CAST(16, MSB(DUP(op_AND_27)), DUP(op_AND_27)))); + RzILOpPure *op_LSHIFT_33 = SHIFTL0(CAST(64, MSB(op_MUL_30), DUP(op_MUL_30)), SN(32, 1)); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_39 = LOGAND(op_RSHIFT_37, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_45 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_47 = LOGAND(op_RSHIFT_45, SN(32, 0xffff)); + RzILOpPure *op_MUL_50 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_39), DUP(op_AND_39))), CAST(16, MSB(DUP(op_AND_39)), DUP(op_AND_39))), CAST(32, MSB(CAST(16, MSB(op_AND_47), DUP(op_AND_47))), CAST(16, MSB(DUP(op_AND_47)), DUP(op_AND_47)))); + RzILOpPure *op_LSHIFT_53 = SHIFTL0(CAST(64, MSB(op_MUL_50), DUP(op_MUL_50)), SN(32, 1)); + RzILOpPure *op_SUB_54 = SUB(op_LSHIFT_33, op_LSHIFT_53); + RzILOpPure *op_ADD_57 = ADD(op_SUB_54, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_66 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_68 = LOGAND(op_RSHIFT_66, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_74 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_76 = LOGAND(op_RSHIFT_74, SN(32, 0xffff)); + RzILOpPure *op_MUL_79 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_68), DUP(op_AND_68))), CAST(16, MSB(DUP(op_AND_68)), DUP(op_AND_68))), CAST(32, MSB(CAST(16, MSB(op_AND_76), DUP(op_AND_76))), CAST(16, MSB(DUP(op_AND_76)), DUP(op_AND_76)))); + RzILOpPure *op_LSHIFT_82 = SHIFTL0(CAST(64, MSB(op_MUL_79), DUP(op_MUL_79)), SN(32, 1)); + RzILOpPure *op_RSHIFT_86 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_88 = LOGAND(op_RSHIFT_86, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_94 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_96 = LOGAND(op_RSHIFT_94, SN(32, 0xffff)); + RzILOpPure *op_MUL_99 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_88), DUP(op_AND_88))), CAST(16, MSB(DUP(op_AND_88)), DUP(op_AND_88))), CAST(32, MSB(CAST(16, MSB(op_AND_96), DUP(op_AND_96))), CAST(16, MSB(DUP(op_AND_96)), DUP(op_AND_96)))); + RzILOpPure *op_LSHIFT_102 = SHIFTL0(CAST(64, MSB(op_MUL_99), DUP(op_MUL_99)), SN(32, 1)); + RzILOpPure *op_SUB_103 = SUB(op_LSHIFT_82, op_LSHIFT_102); + RzILOpPure *op_ADD_106 = ADD(op_SUB_103, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_EQ_107 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_57), SN(32, 0), SN(32, 0x20)), op_ADD_106); + RzILOpPure *op_RSHIFT_158 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_160 = LOGAND(op_RSHIFT_158, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_166 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_168 = LOGAND(op_RSHIFT_166, SN(32, 0xffff)); + RzILOpPure *op_MUL_171 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_160), DUP(op_AND_160))), CAST(16, MSB(DUP(op_AND_160)), DUP(op_AND_160))), CAST(32, MSB(CAST(16, MSB(op_AND_168), DUP(op_AND_168))), CAST(16, MSB(DUP(op_AND_168)), DUP(op_AND_168)))); + RzILOpPure *op_LSHIFT_174 = SHIFTL0(CAST(64, MSB(op_MUL_171), DUP(op_MUL_171)), SN(32, 1)); + RzILOpPure *op_RSHIFT_178 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_180 = LOGAND(op_RSHIFT_178, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_186 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_188 = LOGAND(op_RSHIFT_186, SN(32, 0xffff)); + RzILOpPure *op_MUL_191 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_180), DUP(op_AND_180))), CAST(16, MSB(DUP(op_AND_180)), DUP(op_AND_180))), CAST(32, MSB(CAST(16, MSB(op_AND_188), DUP(op_AND_188))), CAST(16, MSB(DUP(op_AND_188)), DUP(op_AND_188)))); + RzILOpPure *op_LSHIFT_194 = SHIFTL0(CAST(64, MSB(op_MUL_191), DUP(op_MUL_191)), SN(32, 1)); + RzILOpPure *op_SUB_195 = SUB(op_LSHIFT_174, op_LSHIFT_194); + RzILOpPure *op_ADD_198 = ADD(op_SUB_195, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_LT_201 = SLT(op_ADD_198, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_206 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_207 = NEG(op_LSHIFT_206); + RzILOpPure *op_LSHIFT_212 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_215 = SUB(op_LSHIFT_212, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_216 = ITE(op_LT_201, op_NEG_207, op_SUB_215); + RzILOpEffect *gcc_expr_217 = BRANCH(op_EQ_107, EMPTY(), set_usr_field_call_154); + + // h_tmp246 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_219 = SETL("h_tmp246", cond_216); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_220 = SEQN(2, gcc_expr_217, op_ASSIGN_hybrid_tmp_219); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << 0x10)))) | (((ut64) (((st32) ((st16) ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000)) ? (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000) : h_tmp246) >> 0x10) & ((st64) 0xffff)))) & 0xffff)) << 0x10))); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffff), SN(32, 16)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_8 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_6); + RzILOpPure *op_RSHIFT_111 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_113 = LOGAND(op_RSHIFT_111, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_119 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_121 = LOGAND(op_RSHIFT_119, SN(32, 0xffff)); + RzILOpPure *op_MUL_124 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_113), DUP(op_AND_113))), CAST(16, MSB(DUP(op_AND_113)), DUP(op_AND_113))), CAST(32, MSB(CAST(16, MSB(op_AND_121), DUP(op_AND_121))), CAST(16, MSB(DUP(op_AND_121)), DUP(op_AND_121)))); + RzILOpPure *op_LSHIFT_127 = SHIFTL0(CAST(64, MSB(op_MUL_124), DUP(op_MUL_124)), SN(32, 1)); + RzILOpPure *op_RSHIFT_131 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_133 = LOGAND(op_RSHIFT_131, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_139 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_141 = LOGAND(op_RSHIFT_139, SN(32, 0xffff)); + RzILOpPure *op_MUL_144 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_133), DUP(op_AND_133))), CAST(16, MSB(DUP(op_AND_133)), DUP(op_AND_133))), CAST(32, MSB(CAST(16, MSB(op_AND_141), DUP(op_AND_141))), CAST(16, MSB(DUP(op_AND_141)), DUP(op_AND_141)))); + RzILOpPure *op_LSHIFT_147 = SHIFTL0(CAST(64, MSB(op_MUL_144), DUP(op_MUL_144)), SN(32, 1)); + RzILOpPure *op_SUB_148 = SUB(op_LSHIFT_127, op_LSHIFT_147); + RzILOpPure *op_ADD_151 = ADD(op_SUB_148, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *cond_221 = ITE(DUP(op_EQ_107), op_ADD_151, VARL("h_tmp246")); + RzILOpPure *op_RSHIFT_225 = SHIFTRA(cond_221, SN(32, 16)); + RzILOpPure *op_AND_228 = LOGAND(op_RSHIFT_225, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_232 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_228), DUP(op_AND_228))), CAST(16, MSB(DUP(op_AND_228)), DUP(op_AND_228))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_237 = SHIFTL0(CAST(64, IL_FALSE, op_AND_232), SN(32, 16)); + RzILOpPure *op_OR_239 = LOGOR(CAST(64, IL_FALSE, op_AND_8), op_LSHIFT_237); + RzILOpEffect *op_ASSIGN_241 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_239)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_242 = SEQN(2, seq_220, op_ASSIGN_241); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_395 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_258 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_260 = LOGAND(op_RSHIFT_258, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_266 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_268 = LOGAND(op_RSHIFT_266, SN(32, 0xffff)); + RzILOpPure *op_MUL_271 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_260), DUP(op_AND_260))), CAST(16, MSB(DUP(op_AND_260)), DUP(op_AND_260))), CAST(32, MSB(CAST(16, MSB(op_AND_268), DUP(op_AND_268))), CAST(16, MSB(DUP(op_AND_268)), DUP(op_AND_268)))); + RzILOpPure *op_LSHIFT_274 = SHIFTL0(CAST(64, MSB(op_MUL_271), DUP(op_MUL_271)), SN(32, 1)); + RzILOpPure *op_RSHIFT_278 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_280 = LOGAND(op_RSHIFT_278, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_286 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_288 = LOGAND(op_RSHIFT_286, SN(32, 0xffff)); + RzILOpPure *op_MUL_291 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_280), DUP(op_AND_280))), CAST(16, MSB(DUP(op_AND_280)), DUP(op_AND_280))), CAST(32, MSB(CAST(16, MSB(op_AND_288), DUP(op_AND_288))), CAST(16, MSB(DUP(op_AND_288)), DUP(op_AND_288)))); + RzILOpPure *op_LSHIFT_294 = SHIFTL0(CAST(64, MSB(op_MUL_291), DUP(op_MUL_291)), SN(32, 1)); + RzILOpPure *op_ADD_295 = ADD(op_LSHIFT_274, op_LSHIFT_294); + RzILOpPure *op_ADD_298 = ADD(op_ADD_295, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_307 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_309 = LOGAND(op_RSHIFT_307, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_315 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_317 = LOGAND(op_RSHIFT_315, SN(32, 0xffff)); + RzILOpPure *op_MUL_320 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_309), DUP(op_AND_309))), CAST(16, MSB(DUP(op_AND_309)), DUP(op_AND_309))), CAST(32, MSB(CAST(16, MSB(op_AND_317), DUP(op_AND_317))), CAST(16, MSB(DUP(op_AND_317)), DUP(op_AND_317)))); + RzILOpPure *op_LSHIFT_323 = SHIFTL0(CAST(64, MSB(op_MUL_320), DUP(op_MUL_320)), SN(32, 1)); + RzILOpPure *op_RSHIFT_327 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_329 = LOGAND(op_RSHIFT_327, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_335 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_337 = LOGAND(op_RSHIFT_335, SN(32, 0xffff)); + RzILOpPure *op_MUL_340 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_329), DUP(op_AND_329))), CAST(16, MSB(DUP(op_AND_329)), DUP(op_AND_329))), CAST(32, MSB(CAST(16, MSB(op_AND_337), DUP(op_AND_337))), CAST(16, MSB(DUP(op_AND_337)), DUP(op_AND_337)))); + RzILOpPure *op_LSHIFT_343 = SHIFTL0(CAST(64, MSB(op_MUL_340), DUP(op_MUL_340)), SN(32, 1)); + RzILOpPure *op_ADD_344 = ADD(op_LSHIFT_323, op_LSHIFT_343); + RzILOpPure *op_ADD_347 = ADD(op_ADD_344, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_EQ_348 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_298), SN(32, 0), SN(32, 0x20)), op_ADD_347); + RzILOpPure *op_RSHIFT_399 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_401 = LOGAND(op_RSHIFT_399, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_407 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_409 = LOGAND(op_RSHIFT_407, SN(32, 0xffff)); + RzILOpPure *op_MUL_412 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_401), DUP(op_AND_401))), CAST(16, MSB(DUP(op_AND_401)), DUP(op_AND_401))), CAST(32, MSB(CAST(16, MSB(op_AND_409), DUP(op_AND_409))), CAST(16, MSB(DUP(op_AND_409)), DUP(op_AND_409)))); + RzILOpPure *op_LSHIFT_415 = SHIFTL0(CAST(64, MSB(op_MUL_412), DUP(op_MUL_412)), SN(32, 1)); + RzILOpPure *op_RSHIFT_419 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_421 = LOGAND(op_RSHIFT_419, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_427 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_429 = LOGAND(op_RSHIFT_427, SN(32, 0xffff)); + RzILOpPure *op_MUL_432 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_421), DUP(op_AND_421))), CAST(16, MSB(DUP(op_AND_421)), DUP(op_AND_421))), CAST(32, MSB(CAST(16, MSB(op_AND_429), DUP(op_AND_429))), CAST(16, MSB(DUP(op_AND_429)), DUP(op_AND_429)))); + RzILOpPure *op_LSHIFT_435 = SHIFTL0(CAST(64, MSB(op_MUL_432), DUP(op_MUL_432)), SN(32, 1)); + RzILOpPure *op_ADD_436 = ADD(op_LSHIFT_415, op_LSHIFT_435); + RzILOpPure *op_ADD_439 = ADD(op_ADD_436, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_LT_442 = SLT(op_ADD_439, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_447 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_448 = NEG(op_LSHIFT_447); + RzILOpPure *op_LSHIFT_453 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_456 = SUB(op_LSHIFT_453, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_457 = ITE(op_LT_442, op_NEG_448, op_SUB_456); + RzILOpEffect *gcc_expr_458 = BRANCH(op_EQ_348, EMPTY(), set_usr_field_call_395); + + // h_tmp247 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_460 = SETL("h_tmp247", cond_457); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_461 = SEQN(2, gcc_expr_458, op_ASSIGN_hybrid_tmp_460); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << 0x0)))) | (((ut64) (((st32) ((st16) ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000)) ? (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000) : h_tmp247) >> 0x10) & ((st64) 0xffff)))) & 0xffff)) << 0x0))); + RzILOpPure *op_LSHIFT_248 = SHIFTL0(SN(64, 0xffff), SN(32, 0)); + RzILOpPure *op_NOT_249 = LOGNOT(op_LSHIFT_248); + RzILOpPure *op_AND_251 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_249); + RzILOpPure *op_RSHIFT_352 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_354 = LOGAND(op_RSHIFT_352, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_360 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_362 = LOGAND(op_RSHIFT_360, SN(32, 0xffff)); + RzILOpPure *op_MUL_365 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_354), DUP(op_AND_354))), CAST(16, MSB(DUP(op_AND_354)), DUP(op_AND_354))), CAST(32, MSB(CAST(16, MSB(op_AND_362), DUP(op_AND_362))), CAST(16, MSB(DUP(op_AND_362)), DUP(op_AND_362)))); + RzILOpPure *op_LSHIFT_368 = SHIFTL0(CAST(64, MSB(op_MUL_365), DUP(op_MUL_365)), SN(32, 1)); + RzILOpPure *op_RSHIFT_372 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_374 = LOGAND(op_RSHIFT_372, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_380 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_382 = LOGAND(op_RSHIFT_380, SN(32, 0xffff)); + RzILOpPure *op_MUL_385 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_374), DUP(op_AND_374))), CAST(16, MSB(DUP(op_AND_374)), DUP(op_AND_374))), CAST(32, MSB(CAST(16, MSB(op_AND_382), DUP(op_AND_382))), CAST(16, MSB(DUP(op_AND_382)), DUP(op_AND_382)))); + RzILOpPure *op_LSHIFT_388 = SHIFTL0(CAST(64, MSB(op_MUL_385), DUP(op_MUL_385)), SN(32, 1)); + RzILOpPure *op_ADD_389 = ADD(op_LSHIFT_368, op_LSHIFT_388); + RzILOpPure *op_ADD_392 = ADD(op_ADD_389, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *cond_462 = ITE(DUP(op_EQ_348), op_ADD_392, VARL("h_tmp247")); + RzILOpPure *op_RSHIFT_466 = SHIFTRA(cond_462, SN(32, 16)); + RzILOpPure *op_AND_469 = LOGAND(op_RSHIFT_466, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_473 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_469), DUP(op_AND_469))), CAST(16, MSB(DUP(op_AND_469)), DUP(op_AND_469))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_478 = SHIFTL0(CAST(64, IL_FALSE, op_AND_473), SN(32, 0)); + RzILOpPure *op_OR_480 = LOGOR(CAST(64, IL_FALSE, op_AND_251), op_LSHIFT_478); + RzILOpEffect *op_ASSIGN_482 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_480)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_483 = SEQN(2, seq_461, op_ASSIGN_482); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_242, seq_483); + return instruction_sequence; +} + +// Rdd = cmpy(Rs,Rt):sat +RzILOpEffect *hex_il_op_m2_cmpys_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_144 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_17 = LOGAND(op_RSHIFT_15, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_24 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_26 = LOGAND(op_RSHIFT_24, SN(32, 0xffff)); + RzILOpPure *op_MUL_29 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_17), DUP(op_AND_17))), CAST(16, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(CAST(16, MSB(op_AND_26), DUP(op_AND_26))), CAST(16, MSB(DUP(op_AND_26)), DUP(op_AND_26)))); + RzILOpPure *op_LSHIFT_32 = SHIFTL0(CAST(64, MSB(op_MUL_29), DUP(op_MUL_29)), SN(32, 0)); + RzILOpPure *op_RSHIFT_36 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_36, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_44 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_46 = LOGAND(op_RSHIFT_44, SN(32, 0xffff)); + RzILOpPure *op_MUL_49 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_38), DUP(op_AND_38))), CAST(16, MSB(DUP(op_AND_38)), DUP(op_AND_38))), CAST(32, MSB(CAST(16, MSB(op_AND_46), DUP(op_AND_46))), CAST(16, MSB(DUP(op_AND_46)), DUP(op_AND_46)))); + RzILOpPure *op_LSHIFT_52 = SHIFTL0(CAST(64, MSB(op_MUL_49), DUP(op_MUL_49)), SN(32, 0)); + RzILOpPure *op_ADD_53 = ADD(op_LSHIFT_32, op_LSHIFT_52); + RzILOpPure *op_RSHIFT_62 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_64 = LOGAND(op_RSHIFT_62, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_70 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_72 = LOGAND(op_RSHIFT_70, SN(32, 0xffff)); + RzILOpPure *op_MUL_75 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_64), DUP(op_AND_64))), CAST(16, MSB(DUP(op_AND_64)), DUP(op_AND_64))), CAST(32, MSB(CAST(16, MSB(op_AND_72), DUP(op_AND_72))), CAST(16, MSB(DUP(op_AND_72)), DUP(op_AND_72)))); + RzILOpPure *op_LSHIFT_78 = SHIFTL0(CAST(64, MSB(op_MUL_75), DUP(op_MUL_75)), SN(32, 0)); + RzILOpPure *op_RSHIFT_82 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_84 = LOGAND(op_RSHIFT_82, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_90 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_92 = LOGAND(op_RSHIFT_90, SN(32, 0xffff)); + RzILOpPure *op_MUL_95 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_84), DUP(op_AND_84))), CAST(16, MSB(DUP(op_AND_84)), DUP(op_AND_84))), CAST(32, MSB(CAST(16, MSB(op_AND_92), DUP(op_AND_92))), CAST(16, MSB(DUP(op_AND_92)), DUP(op_AND_92)))); + RzILOpPure *op_LSHIFT_98 = SHIFTL0(CAST(64, MSB(op_MUL_95), DUP(op_MUL_95)), SN(32, 0)); + RzILOpPure *op_ADD_99 = ADD(op_LSHIFT_78, op_LSHIFT_98); + RzILOpPure *op_EQ_100 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_53), SN(32, 0), SN(32, 0x20)), op_ADD_99); + RzILOpPure *op_RSHIFT_148 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_150 = LOGAND(op_RSHIFT_148, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_156 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_158 = LOGAND(op_RSHIFT_156, SN(32, 0xffff)); + RzILOpPure *op_MUL_161 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_150), DUP(op_AND_150))), CAST(16, MSB(DUP(op_AND_150)), DUP(op_AND_150))), CAST(32, MSB(CAST(16, MSB(op_AND_158), DUP(op_AND_158))), CAST(16, MSB(DUP(op_AND_158)), DUP(op_AND_158)))); + RzILOpPure *op_LSHIFT_164 = SHIFTL0(CAST(64, MSB(op_MUL_161), DUP(op_MUL_161)), SN(32, 0)); + RzILOpPure *op_RSHIFT_168 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_170 = LOGAND(op_RSHIFT_168, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_176 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_178 = LOGAND(op_RSHIFT_176, SN(32, 0xffff)); + RzILOpPure *op_MUL_181 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_170), DUP(op_AND_170))), CAST(16, MSB(DUP(op_AND_170)), DUP(op_AND_170))), CAST(32, MSB(CAST(16, MSB(op_AND_178), DUP(op_AND_178))), CAST(16, MSB(DUP(op_AND_178)), DUP(op_AND_178)))); + RzILOpPure *op_LSHIFT_184 = SHIFTL0(CAST(64, MSB(op_MUL_181), DUP(op_MUL_181)), SN(32, 0)); + RzILOpPure *op_ADD_185 = ADD(op_LSHIFT_164, op_LSHIFT_184); + RzILOpPure *op_LT_188 = SLT(op_ADD_185, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_193 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_194 = NEG(op_LSHIFT_193); + RzILOpPure *op_LSHIFT_199 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_202 = SUB(op_LSHIFT_199, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_203 = ITE(op_LT_188, op_NEG_194, op_SUB_202); + RzILOpEffect *gcc_expr_204 = BRANCH(op_EQ_100, EMPTY(), set_usr_field_call_144); + + // h_tmp248 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_206 = SETL("h_tmp248", cond_203); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_207 = SEQN(2, gcc_expr_204, op_ASSIGN_hybrid_tmp_206); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)) ? (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) : h_tmp248) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_104 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_106 = LOGAND(op_RSHIFT_104, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_112 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_114 = LOGAND(op_RSHIFT_112, SN(32, 0xffff)); + RzILOpPure *op_MUL_117 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_106), DUP(op_AND_106))), CAST(16, MSB(DUP(op_AND_106)), DUP(op_AND_106))), CAST(32, MSB(CAST(16, MSB(op_AND_114), DUP(op_AND_114))), CAST(16, MSB(DUP(op_AND_114)), DUP(op_AND_114)))); + RzILOpPure *op_LSHIFT_120 = SHIFTL0(CAST(64, MSB(op_MUL_117), DUP(op_MUL_117)), SN(32, 0)); + RzILOpPure *op_RSHIFT_124 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_126 = LOGAND(op_RSHIFT_124, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_132 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_134 = LOGAND(op_RSHIFT_132, SN(32, 0xffff)); + RzILOpPure *op_MUL_137 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_126), DUP(op_AND_126))), CAST(16, MSB(DUP(op_AND_126)), DUP(op_AND_126))), CAST(32, MSB(CAST(16, MSB(op_AND_134), DUP(op_AND_134))), CAST(16, MSB(DUP(op_AND_134)), DUP(op_AND_134)))); + RzILOpPure *op_LSHIFT_140 = SHIFTL0(CAST(64, MSB(op_MUL_137), DUP(op_MUL_137)), SN(32, 0)); + RzILOpPure *op_ADD_141 = ADD(op_LSHIFT_120, op_LSHIFT_140); + RzILOpPure *cond_208 = ITE(DUP(op_EQ_100), op_ADD_141, VARL("h_tmp248")); + RzILOpPure *op_AND_210 = LOGAND(cond_208, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_214 = SHIFTL0(op_AND_210, SN(32, 0x20)); + RzILOpPure *op_OR_215 = LOGOR(op_AND_7, op_LSHIFT_214); + RzILOpEffect *op_ASSIGN_216 = WRITE_REG(bundle, Rdd_op, op_OR_215); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_217 = SEQN(2, seq_207, op_ASSIGN_216); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_360 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_232 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_234 = LOGAND(op_RSHIFT_232, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_240 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_242 = LOGAND(op_RSHIFT_240, SN(32, 0xffff)); + RzILOpPure *op_MUL_245 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_234), DUP(op_AND_234))), CAST(16, MSB(DUP(op_AND_234)), DUP(op_AND_234))), CAST(32, MSB(CAST(16, MSB(op_AND_242), DUP(op_AND_242))), CAST(16, MSB(DUP(op_AND_242)), DUP(op_AND_242)))); + RzILOpPure *op_LSHIFT_248 = SHIFTL0(CAST(64, MSB(op_MUL_245), DUP(op_MUL_245)), SN(32, 0)); + RzILOpPure *op_RSHIFT_252 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_254 = LOGAND(op_RSHIFT_252, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_260 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_262 = LOGAND(op_RSHIFT_260, SN(32, 0xffff)); + RzILOpPure *op_MUL_265 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_254), DUP(op_AND_254))), CAST(16, MSB(DUP(op_AND_254)), DUP(op_AND_254))), CAST(32, MSB(CAST(16, MSB(op_AND_262), DUP(op_AND_262))), CAST(16, MSB(DUP(op_AND_262)), DUP(op_AND_262)))); + RzILOpPure *op_LSHIFT_268 = SHIFTL0(CAST(64, MSB(op_MUL_265), DUP(op_MUL_265)), SN(32, 0)); + RzILOpPure *op_SUB_269 = SUB(op_LSHIFT_248, op_LSHIFT_268); + RzILOpPure *op_RSHIFT_278 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_280 = LOGAND(op_RSHIFT_278, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_286 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_288 = LOGAND(op_RSHIFT_286, SN(32, 0xffff)); + RzILOpPure *op_MUL_291 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_280), DUP(op_AND_280))), CAST(16, MSB(DUP(op_AND_280)), DUP(op_AND_280))), CAST(32, MSB(CAST(16, MSB(op_AND_288), DUP(op_AND_288))), CAST(16, MSB(DUP(op_AND_288)), DUP(op_AND_288)))); + RzILOpPure *op_LSHIFT_294 = SHIFTL0(CAST(64, MSB(op_MUL_291), DUP(op_MUL_291)), SN(32, 0)); + RzILOpPure *op_RSHIFT_298 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_300 = LOGAND(op_RSHIFT_298, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_306 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_308 = LOGAND(op_RSHIFT_306, SN(32, 0xffff)); + RzILOpPure *op_MUL_311 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_300), DUP(op_AND_300))), CAST(16, MSB(DUP(op_AND_300)), DUP(op_AND_300))), CAST(32, MSB(CAST(16, MSB(op_AND_308), DUP(op_AND_308))), CAST(16, MSB(DUP(op_AND_308)), DUP(op_AND_308)))); + RzILOpPure *op_LSHIFT_314 = SHIFTL0(CAST(64, MSB(op_MUL_311), DUP(op_MUL_311)), SN(32, 0)); + RzILOpPure *op_SUB_315 = SUB(op_LSHIFT_294, op_LSHIFT_314); + RzILOpPure *op_EQ_316 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_269), SN(32, 0), SN(32, 0x20)), op_SUB_315); + RzILOpPure *op_RSHIFT_364 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_366 = LOGAND(op_RSHIFT_364, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_372 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_374 = LOGAND(op_RSHIFT_372, SN(32, 0xffff)); + RzILOpPure *op_MUL_377 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_366), DUP(op_AND_366))), CAST(16, MSB(DUP(op_AND_366)), DUP(op_AND_366))), CAST(32, MSB(CAST(16, MSB(op_AND_374), DUP(op_AND_374))), CAST(16, MSB(DUP(op_AND_374)), DUP(op_AND_374)))); + RzILOpPure *op_LSHIFT_380 = SHIFTL0(CAST(64, MSB(op_MUL_377), DUP(op_MUL_377)), SN(32, 0)); + RzILOpPure *op_RSHIFT_384 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_386 = LOGAND(op_RSHIFT_384, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_392 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_394 = LOGAND(op_RSHIFT_392, SN(32, 0xffff)); + RzILOpPure *op_MUL_397 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_386), DUP(op_AND_386))), CAST(16, MSB(DUP(op_AND_386)), DUP(op_AND_386))), CAST(32, MSB(CAST(16, MSB(op_AND_394), DUP(op_AND_394))), CAST(16, MSB(DUP(op_AND_394)), DUP(op_AND_394)))); + RzILOpPure *op_LSHIFT_400 = SHIFTL0(CAST(64, MSB(op_MUL_397), DUP(op_MUL_397)), SN(32, 0)); + RzILOpPure *op_SUB_401 = SUB(op_LSHIFT_380, op_LSHIFT_400); + RzILOpPure *op_LT_404 = SLT(op_SUB_401, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_409 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_410 = NEG(op_LSHIFT_409); + RzILOpPure *op_LSHIFT_415 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_418 = SUB(op_LSHIFT_415, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_419 = ITE(op_LT_404, op_NEG_410, op_SUB_418); + RzILOpEffect *gcc_expr_420 = BRANCH(op_EQ_316, EMPTY(), set_usr_field_call_360); + + // h_tmp249 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_422 = SETL("h_tmp249", cond_419); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_423 = SEQN(2, gcc_expr_420, op_ASSIGN_hybrid_tmp_422); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)) ? (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) : h_tmp249) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_223 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_224 = LOGNOT(op_LSHIFT_223); + RzILOpPure *op_AND_225 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_224); + RzILOpPure *op_RSHIFT_320 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_322 = LOGAND(op_RSHIFT_320, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_328 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_330 = LOGAND(op_RSHIFT_328, SN(32, 0xffff)); + RzILOpPure *op_MUL_333 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_322), DUP(op_AND_322))), CAST(16, MSB(DUP(op_AND_322)), DUP(op_AND_322))), CAST(32, MSB(CAST(16, MSB(op_AND_330), DUP(op_AND_330))), CAST(16, MSB(DUP(op_AND_330)), DUP(op_AND_330)))); + RzILOpPure *op_LSHIFT_336 = SHIFTL0(CAST(64, MSB(op_MUL_333), DUP(op_MUL_333)), SN(32, 0)); + RzILOpPure *op_RSHIFT_340 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_342 = LOGAND(op_RSHIFT_340, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_348 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_350 = LOGAND(op_RSHIFT_348, SN(32, 0xffff)); + RzILOpPure *op_MUL_353 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_342), DUP(op_AND_342))), CAST(16, MSB(DUP(op_AND_342)), DUP(op_AND_342))), CAST(32, MSB(CAST(16, MSB(op_AND_350), DUP(op_AND_350))), CAST(16, MSB(DUP(op_AND_350)), DUP(op_AND_350)))); + RzILOpPure *op_LSHIFT_356 = SHIFTL0(CAST(64, MSB(op_MUL_353), DUP(op_MUL_353)), SN(32, 0)); + RzILOpPure *op_SUB_357 = SUB(op_LSHIFT_336, op_LSHIFT_356); + RzILOpPure *cond_424 = ITE(DUP(op_EQ_316), op_SUB_357, VARL("h_tmp249")); + RzILOpPure *op_AND_426 = LOGAND(cond_424, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_430 = SHIFTL0(op_AND_426, SN(32, 0)); + RzILOpPure *op_OR_431 = LOGOR(op_AND_225, op_LSHIFT_430); + RzILOpEffect *op_ASSIGN_432 = WRITE_REG(bundle, Rdd_op, op_OR_431); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_433 = SEQN(2, seq_423, op_ASSIGN_432); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_217, seq_433); + return instruction_sequence; +} + +// Rdd = cmpy(Rs,Rt):<<1:sat +RzILOpEffect *hex_il_op_m2_cmpys_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_144 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_17 = LOGAND(op_RSHIFT_15, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_24 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_26 = LOGAND(op_RSHIFT_24, SN(32, 0xffff)); + RzILOpPure *op_MUL_29 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_17), DUP(op_AND_17))), CAST(16, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(CAST(16, MSB(op_AND_26), DUP(op_AND_26))), CAST(16, MSB(DUP(op_AND_26)), DUP(op_AND_26)))); + RzILOpPure *op_LSHIFT_32 = SHIFTL0(CAST(64, MSB(op_MUL_29), DUP(op_MUL_29)), SN(32, 1)); + RzILOpPure *op_RSHIFT_36 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_36, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_44 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_46 = LOGAND(op_RSHIFT_44, SN(32, 0xffff)); + RzILOpPure *op_MUL_49 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_38), DUP(op_AND_38))), CAST(16, MSB(DUP(op_AND_38)), DUP(op_AND_38))), CAST(32, MSB(CAST(16, MSB(op_AND_46), DUP(op_AND_46))), CAST(16, MSB(DUP(op_AND_46)), DUP(op_AND_46)))); + RzILOpPure *op_LSHIFT_52 = SHIFTL0(CAST(64, MSB(op_MUL_49), DUP(op_MUL_49)), SN(32, 1)); + RzILOpPure *op_ADD_53 = ADD(op_LSHIFT_32, op_LSHIFT_52); + RzILOpPure *op_RSHIFT_62 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_64 = LOGAND(op_RSHIFT_62, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_70 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_72 = LOGAND(op_RSHIFT_70, SN(32, 0xffff)); + RzILOpPure *op_MUL_75 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_64), DUP(op_AND_64))), CAST(16, MSB(DUP(op_AND_64)), DUP(op_AND_64))), CAST(32, MSB(CAST(16, MSB(op_AND_72), DUP(op_AND_72))), CAST(16, MSB(DUP(op_AND_72)), DUP(op_AND_72)))); + RzILOpPure *op_LSHIFT_78 = SHIFTL0(CAST(64, MSB(op_MUL_75), DUP(op_MUL_75)), SN(32, 1)); + RzILOpPure *op_RSHIFT_82 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_84 = LOGAND(op_RSHIFT_82, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_90 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_92 = LOGAND(op_RSHIFT_90, SN(32, 0xffff)); + RzILOpPure *op_MUL_95 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_84), DUP(op_AND_84))), CAST(16, MSB(DUP(op_AND_84)), DUP(op_AND_84))), CAST(32, MSB(CAST(16, MSB(op_AND_92), DUP(op_AND_92))), CAST(16, MSB(DUP(op_AND_92)), DUP(op_AND_92)))); + RzILOpPure *op_LSHIFT_98 = SHIFTL0(CAST(64, MSB(op_MUL_95), DUP(op_MUL_95)), SN(32, 1)); + RzILOpPure *op_ADD_99 = ADD(op_LSHIFT_78, op_LSHIFT_98); + RzILOpPure *op_EQ_100 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_53), SN(32, 0), SN(32, 0x20)), op_ADD_99); + RzILOpPure *op_RSHIFT_148 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_150 = LOGAND(op_RSHIFT_148, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_156 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_158 = LOGAND(op_RSHIFT_156, SN(32, 0xffff)); + RzILOpPure *op_MUL_161 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_150), DUP(op_AND_150))), CAST(16, MSB(DUP(op_AND_150)), DUP(op_AND_150))), CAST(32, MSB(CAST(16, MSB(op_AND_158), DUP(op_AND_158))), CAST(16, MSB(DUP(op_AND_158)), DUP(op_AND_158)))); + RzILOpPure *op_LSHIFT_164 = SHIFTL0(CAST(64, MSB(op_MUL_161), DUP(op_MUL_161)), SN(32, 1)); + RzILOpPure *op_RSHIFT_168 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_170 = LOGAND(op_RSHIFT_168, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_176 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_178 = LOGAND(op_RSHIFT_176, SN(32, 0xffff)); + RzILOpPure *op_MUL_181 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_170), DUP(op_AND_170))), CAST(16, MSB(DUP(op_AND_170)), DUP(op_AND_170))), CAST(32, MSB(CAST(16, MSB(op_AND_178), DUP(op_AND_178))), CAST(16, MSB(DUP(op_AND_178)), DUP(op_AND_178)))); + RzILOpPure *op_LSHIFT_184 = SHIFTL0(CAST(64, MSB(op_MUL_181), DUP(op_MUL_181)), SN(32, 1)); + RzILOpPure *op_ADD_185 = ADD(op_LSHIFT_164, op_LSHIFT_184); + RzILOpPure *op_LT_188 = SLT(op_ADD_185, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_193 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_194 = NEG(op_LSHIFT_193); + RzILOpPure *op_LSHIFT_199 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_202 = SUB(op_LSHIFT_199, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_203 = ITE(op_LT_188, op_NEG_194, op_SUB_202); + RzILOpEffect *gcc_expr_204 = BRANCH(op_EQ_100, EMPTY(), set_usr_field_call_144); + + // h_tmp250 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_206 = SETL("h_tmp250", cond_203); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_207 = SEQN(2, gcc_expr_204, op_ASSIGN_hybrid_tmp_206); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)) ? (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) : h_tmp250) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_104 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_106 = LOGAND(op_RSHIFT_104, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_112 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_114 = LOGAND(op_RSHIFT_112, SN(32, 0xffff)); + RzILOpPure *op_MUL_117 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_106), DUP(op_AND_106))), CAST(16, MSB(DUP(op_AND_106)), DUP(op_AND_106))), CAST(32, MSB(CAST(16, MSB(op_AND_114), DUP(op_AND_114))), CAST(16, MSB(DUP(op_AND_114)), DUP(op_AND_114)))); + RzILOpPure *op_LSHIFT_120 = SHIFTL0(CAST(64, MSB(op_MUL_117), DUP(op_MUL_117)), SN(32, 1)); + RzILOpPure *op_RSHIFT_124 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_126 = LOGAND(op_RSHIFT_124, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_132 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_134 = LOGAND(op_RSHIFT_132, SN(32, 0xffff)); + RzILOpPure *op_MUL_137 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_126), DUP(op_AND_126))), CAST(16, MSB(DUP(op_AND_126)), DUP(op_AND_126))), CAST(32, MSB(CAST(16, MSB(op_AND_134), DUP(op_AND_134))), CAST(16, MSB(DUP(op_AND_134)), DUP(op_AND_134)))); + RzILOpPure *op_LSHIFT_140 = SHIFTL0(CAST(64, MSB(op_MUL_137), DUP(op_MUL_137)), SN(32, 1)); + RzILOpPure *op_ADD_141 = ADD(op_LSHIFT_120, op_LSHIFT_140); + RzILOpPure *cond_208 = ITE(DUP(op_EQ_100), op_ADD_141, VARL("h_tmp250")); + RzILOpPure *op_AND_210 = LOGAND(cond_208, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_214 = SHIFTL0(op_AND_210, SN(32, 0x20)); + RzILOpPure *op_OR_215 = LOGOR(op_AND_7, op_LSHIFT_214); + RzILOpEffect *op_ASSIGN_216 = WRITE_REG(bundle, Rdd_op, op_OR_215); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_217 = SEQN(2, seq_207, op_ASSIGN_216); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_360 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_232 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_234 = LOGAND(op_RSHIFT_232, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_240 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_242 = LOGAND(op_RSHIFT_240, SN(32, 0xffff)); + RzILOpPure *op_MUL_245 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_234), DUP(op_AND_234))), CAST(16, MSB(DUP(op_AND_234)), DUP(op_AND_234))), CAST(32, MSB(CAST(16, MSB(op_AND_242), DUP(op_AND_242))), CAST(16, MSB(DUP(op_AND_242)), DUP(op_AND_242)))); + RzILOpPure *op_LSHIFT_248 = SHIFTL0(CAST(64, MSB(op_MUL_245), DUP(op_MUL_245)), SN(32, 1)); + RzILOpPure *op_RSHIFT_252 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_254 = LOGAND(op_RSHIFT_252, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_260 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_262 = LOGAND(op_RSHIFT_260, SN(32, 0xffff)); + RzILOpPure *op_MUL_265 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_254), DUP(op_AND_254))), CAST(16, MSB(DUP(op_AND_254)), DUP(op_AND_254))), CAST(32, MSB(CAST(16, MSB(op_AND_262), DUP(op_AND_262))), CAST(16, MSB(DUP(op_AND_262)), DUP(op_AND_262)))); + RzILOpPure *op_LSHIFT_268 = SHIFTL0(CAST(64, MSB(op_MUL_265), DUP(op_MUL_265)), SN(32, 1)); + RzILOpPure *op_SUB_269 = SUB(op_LSHIFT_248, op_LSHIFT_268); + RzILOpPure *op_RSHIFT_278 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_280 = LOGAND(op_RSHIFT_278, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_286 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_288 = LOGAND(op_RSHIFT_286, SN(32, 0xffff)); + RzILOpPure *op_MUL_291 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_280), DUP(op_AND_280))), CAST(16, MSB(DUP(op_AND_280)), DUP(op_AND_280))), CAST(32, MSB(CAST(16, MSB(op_AND_288), DUP(op_AND_288))), CAST(16, MSB(DUP(op_AND_288)), DUP(op_AND_288)))); + RzILOpPure *op_LSHIFT_294 = SHIFTL0(CAST(64, MSB(op_MUL_291), DUP(op_MUL_291)), SN(32, 1)); + RzILOpPure *op_RSHIFT_298 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_300 = LOGAND(op_RSHIFT_298, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_306 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_308 = LOGAND(op_RSHIFT_306, SN(32, 0xffff)); + RzILOpPure *op_MUL_311 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_300), DUP(op_AND_300))), CAST(16, MSB(DUP(op_AND_300)), DUP(op_AND_300))), CAST(32, MSB(CAST(16, MSB(op_AND_308), DUP(op_AND_308))), CAST(16, MSB(DUP(op_AND_308)), DUP(op_AND_308)))); + RzILOpPure *op_LSHIFT_314 = SHIFTL0(CAST(64, MSB(op_MUL_311), DUP(op_MUL_311)), SN(32, 1)); + RzILOpPure *op_SUB_315 = SUB(op_LSHIFT_294, op_LSHIFT_314); + RzILOpPure *op_EQ_316 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_269), SN(32, 0), SN(32, 0x20)), op_SUB_315); + RzILOpPure *op_RSHIFT_364 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_366 = LOGAND(op_RSHIFT_364, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_372 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_374 = LOGAND(op_RSHIFT_372, SN(32, 0xffff)); + RzILOpPure *op_MUL_377 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_366), DUP(op_AND_366))), CAST(16, MSB(DUP(op_AND_366)), DUP(op_AND_366))), CAST(32, MSB(CAST(16, MSB(op_AND_374), DUP(op_AND_374))), CAST(16, MSB(DUP(op_AND_374)), DUP(op_AND_374)))); + RzILOpPure *op_LSHIFT_380 = SHIFTL0(CAST(64, MSB(op_MUL_377), DUP(op_MUL_377)), SN(32, 1)); + RzILOpPure *op_RSHIFT_384 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_386 = LOGAND(op_RSHIFT_384, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_392 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_394 = LOGAND(op_RSHIFT_392, SN(32, 0xffff)); + RzILOpPure *op_MUL_397 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_386), DUP(op_AND_386))), CAST(16, MSB(DUP(op_AND_386)), DUP(op_AND_386))), CAST(32, MSB(CAST(16, MSB(op_AND_394), DUP(op_AND_394))), CAST(16, MSB(DUP(op_AND_394)), DUP(op_AND_394)))); + RzILOpPure *op_LSHIFT_400 = SHIFTL0(CAST(64, MSB(op_MUL_397), DUP(op_MUL_397)), SN(32, 1)); + RzILOpPure *op_SUB_401 = SUB(op_LSHIFT_380, op_LSHIFT_400); + RzILOpPure *op_LT_404 = SLT(op_SUB_401, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_409 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_410 = NEG(op_LSHIFT_409); + RzILOpPure *op_LSHIFT_415 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_418 = SUB(op_LSHIFT_415, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_419 = ITE(op_LT_404, op_NEG_410, op_SUB_418); + RzILOpEffect *gcc_expr_420 = BRANCH(op_EQ_316, EMPTY(), set_usr_field_call_360); + + // h_tmp251 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_422 = SETL("h_tmp251", cond_419); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_423 = SEQN(2, gcc_expr_420, op_ASSIGN_hybrid_tmp_422); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)) ? (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) : h_tmp251) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_223 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_224 = LOGNOT(op_LSHIFT_223); + RzILOpPure *op_AND_225 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_224); + RzILOpPure *op_RSHIFT_320 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_322 = LOGAND(op_RSHIFT_320, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_328 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_330 = LOGAND(op_RSHIFT_328, SN(32, 0xffff)); + RzILOpPure *op_MUL_333 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_322), DUP(op_AND_322))), CAST(16, MSB(DUP(op_AND_322)), DUP(op_AND_322))), CAST(32, MSB(CAST(16, MSB(op_AND_330), DUP(op_AND_330))), CAST(16, MSB(DUP(op_AND_330)), DUP(op_AND_330)))); + RzILOpPure *op_LSHIFT_336 = SHIFTL0(CAST(64, MSB(op_MUL_333), DUP(op_MUL_333)), SN(32, 1)); + RzILOpPure *op_RSHIFT_340 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_342 = LOGAND(op_RSHIFT_340, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_348 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_350 = LOGAND(op_RSHIFT_348, SN(32, 0xffff)); + RzILOpPure *op_MUL_353 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_342), DUP(op_AND_342))), CAST(16, MSB(DUP(op_AND_342)), DUP(op_AND_342))), CAST(32, MSB(CAST(16, MSB(op_AND_350), DUP(op_AND_350))), CAST(16, MSB(DUP(op_AND_350)), DUP(op_AND_350)))); + RzILOpPure *op_LSHIFT_356 = SHIFTL0(CAST(64, MSB(op_MUL_353), DUP(op_MUL_353)), SN(32, 1)); + RzILOpPure *op_SUB_357 = SUB(op_LSHIFT_336, op_LSHIFT_356); + RzILOpPure *cond_424 = ITE(DUP(op_EQ_316), op_SUB_357, VARL("h_tmp251")); + RzILOpPure *op_AND_426 = LOGAND(cond_424, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_430 = SHIFTL0(op_AND_426, SN(32, 0)); + RzILOpPure *op_OR_431 = LOGOR(op_AND_225, op_LSHIFT_430); + RzILOpEffect *op_ASSIGN_432 = WRITE_REG(bundle, Rdd_op, op_OR_431); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_433 = SEQN(2, seq_423, op_ASSIGN_432); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_217, seq_433); + return instruction_sequence; +} + +// Rdd = cmpy(Rs,Rt*):sat +RzILOpEffect *hex_il_op_m2_cmpysc_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_144 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_17 = LOGAND(op_RSHIFT_15, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_24 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_26 = LOGAND(op_RSHIFT_24, SN(32, 0xffff)); + RzILOpPure *op_MUL_29 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_17), DUP(op_AND_17))), CAST(16, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(CAST(16, MSB(op_AND_26), DUP(op_AND_26))), CAST(16, MSB(DUP(op_AND_26)), DUP(op_AND_26)))); + RzILOpPure *op_LSHIFT_32 = SHIFTL0(CAST(64, MSB(op_MUL_29), DUP(op_MUL_29)), SN(32, 0)); + RzILOpPure *op_RSHIFT_36 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_36, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_44 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_46 = LOGAND(op_RSHIFT_44, SN(32, 0xffff)); + RzILOpPure *op_MUL_49 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_38), DUP(op_AND_38))), CAST(16, MSB(DUP(op_AND_38)), DUP(op_AND_38))), CAST(32, MSB(CAST(16, MSB(op_AND_46), DUP(op_AND_46))), CAST(16, MSB(DUP(op_AND_46)), DUP(op_AND_46)))); + RzILOpPure *op_LSHIFT_52 = SHIFTL0(CAST(64, MSB(op_MUL_49), DUP(op_MUL_49)), SN(32, 0)); + RzILOpPure *op_SUB_53 = SUB(op_LSHIFT_32, op_LSHIFT_52); + RzILOpPure *op_RSHIFT_62 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_64 = LOGAND(op_RSHIFT_62, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_70 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_72 = LOGAND(op_RSHIFT_70, SN(32, 0xffff)); + RzILOpPure *op_MUL_75 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_64), DUP(op_AND_64))), CAST(16, MSB(DUP(op_AND_64)), DUP(op_AND_64))), CAST(32, MSB(CAST(16, MSB(op_AND_72), DUP(op_AND_72))), CAST(16, MSB(DUP(op_AND_72)), DUP(op_AND_72)))); + RzILOpPure *op_LSHIFT_78 = SHIFTL0(CAST(64, MSB(op_MUL_75), DUP(op_MUL_75)), SN(32, 0)); + RzILOpPure *op_RSHIFT_82 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_84 = LOGAND(op_RSHIFT_82, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_90 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_92 = LOGAND(op_RSHIFT_90, SN(32, 0xffff)); + RzILOpPure *op_MUL_95 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_84), DUP(op_AND_84))), CAST(16, MSB(DUP(op_AND_84)), DUP(op_AND_84))), CAST(32, MSB(CAST(16, MSB(op_AND_92), DUP(op_AND_92))), CAST(16, MSB(DUP(op_AND_92)), DUP(op_AND_92)))); + RzILOpPure *op_LSHIFT_98 = SHIFTL0(CAST(64, MSB(op_MUL_95), DUP(op_MUL_95)), SN(32, 0)); + RzILOpPure *op_SUB_99 = SUB(op_LSHIFT_78, op_LSHIFT_98); + RzILOpPure *op_EQ_100 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_53), SN(32, 0), SN(32, 0x20)), op_SUB_99); + RzILOpPure *op_RSHIFT_148 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_150 = LOGAND(op_RSHIFT_148, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_156 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_158 = LOGAND(op_RSHIFT_156, SN(32, 0xffff)); + RzILOpPure *op_MUL_161 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_150), DUP(op_AND_150))), CAST(16, MSB(DUP(op_AND_150)), DUP(op_AND_150))), CAST(32, MSB(CAST(16, MSB(op_AND_158), DUP(op_AND_158))), CAST(16, MSB(DUP(op_AND_158)), DUP(op_AND_158)))); + RzILOpPure *op_LSHIFT_164 = SHIFTL0(CAST(64, MSB(op_MUL_161), DUP(op_MUL_161)), SN(32, 0)); + RzILOpPure *op_RSHIFT_168 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_170 = LOGAND(op_RSHIFT_168, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_176 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_178 = LOGAND(op_RSHIFT_176, SN(32, 0xffff)); + RzILOpPure *op_MUL_181 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_170), DUP(op_AND_170))), CAST(16, MSB(DUP(op_AND_170)), DUP(op_AND_170))), CAST(32, MSB(CAST(16, MSB(op_AND_178), DUP(op_AND_178))), CAST(16, MSB(DUP(op_AND_178)), DUP(op_AND_178)))); + RzILOpPure *op_LSHIFT_184 = SHIFTL0(CAST(64, MSB(op_MUL_181), DUP(op_MUL_181)), SN(32, 0)); + RzILOpPure *op_SUB_185 = SUB(op_LSHIFT_164, op_LSHIFT_184); + RzILOpPure *op_LT_188 = SLT(op_SUB_185, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_193 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_194 = NEG(op_LSHIFT_193); + RzILOpPure *op_LSHIFT_199 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_202 = SUB(op_LSHIFT_199, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_203 = ITE(op_LT_188, op_NEG_194, op_SUB_202); + RzILOpEffect *gcc_expr_204 = BRANCH(op_EQ_100, EMPTY(), set_usr_field_call_144); + + // h_tmp252 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_206 = SETL("h_tmp252", cond_203); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_207 = SEQN(2, gcc_expr_204, op_ASSIGN_hybrid_tmp_206); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)) ? (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) : h_tmp252) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_104 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_106 = LOGAND(op_RSHIFT_104, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_112 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_114 = LOGAND(op_RSHIFT_112, SN(32, 0xffff)); + RzILOpPure *op_MUL_117 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_106), DUP(op_AND_106))), CAST(16, MSB(DUP(op_AND_106)), DUP(op_AND_106))), CAST(32, MSB(CAST(16, MSB(op_AND_114), DUP(op_AND_114))), CAST(16, MSB(DUP(op_AND_114)), DUP(op_AND_114)))); + RzILOpPure *op_LSHIFT_120 = SHIFTL0(CAST(64, MSB(op_MUL_117), DUP(op_MUL_117)), SN(32, 0)); + RzILOpPure *op_RSHIFT_124 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_126 = LOGAND(op_RSHIFT_124, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_132 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_134 = LOGAND(op_RSHIFT_132, SN(32, 0xffff)); + RzILOpPure *op_MUL_137 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_126), DUP(op_AND_126))), CAST(16, MSB(DUP(op_AND_126)), DUP(op_AND_126))), CAST(32, MSB(CAST(16, MSB(op_AND_134), DUP(op_AND_134))), CAST(16, MSB(DUP(op_AND_134)), DUP(op_AND_134)))); + RzILOpPure *op_LSHIFT_140 = SHIFTL0(CAST(64, MSB(op_MUL_137), DUP(op_MUL_137)), SN(32, 0)); + RzILOpPure *op_SUB_141 = SUB(op_LSHIFT_120, op_LSHIFT_140); + RzILOpPure *cond_208 = ITE(DUP(op_EQ_100), op_SUB_141, VARL("h_tmp252")); + RzILOpPure *op_AND_210 = LOGAND(cond_208, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_214 = SHIFTL0(op_AND_210, SN(32, 0x20)); + RzILOpPure *op_OR_215 = LOGOR(op_AND_7, op_LSHIFT_214); + RzILOpEffect *op_ASSIGN_216 = WRITE_REG(bundle, Rdd_op, op_OR_215); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_217 = SEQN(2, seq_207, op_ASSIGN_216); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_360 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_232 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_234 = LOGAND(op_RSHIFT_232, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_240 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_242 = LOGAND(op_RSHIFT_240, SN(32, 0xffff)); + RzILOpPure *op_MUL_245 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_234), DUP(op_AND_234))), CAST(16, MSB(DUP(op_AND_234)), DUP(op_AND_234))), CAST(32, MSB(CAST(16, MSB(op_AND_242), DUP(op_AND_242))), CAST(16, MSB(DUP(op_AND_242)), DUP(op_AND_242)))); + RzILOpPure *op_LSHIFT_248 = SHIFTL0(CAST(64, MSB(op_MUL_245), DUP(op_MUL_245)), SN(32, 0)); + RzILOpPure *op_RSHIFT_252 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_254 = LOGAND(op_RSHIFT_252, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_260 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_262 = LOGAND(op_RSHIFT_260, SN(32, 0xffff)); + RzILOpPure *op_MUL_265 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_254), DUP(op_AND_254))), CAST(16, MSB(DUP(op_AND_254)), DUP(op_AND_254))), CAST(32, MSB(CAST(16, MSB(op_AND_262), DUP(op_AND_262))), CAST(16, MSB(DUP(op_AND_262)), DUP(op_AND_262)))); + RzILOpPure *op_LSHIFT_268 = SHIFTL0(CAST(64, MSB(op_MUL_265), DUP(op_MUL_265)), SN(32, 0)); + RzILOpPure *op_ADD_269 = ADD(op_LSHIFT_248, op_LSHIFT_268); + RzILOpPure *op_RSHIFT_278 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_280 = LOGAND(op_RSHIFT_278, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_286 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_288 = LOGAND(op_RSHIFT_286, SN(32, 0xffff)); + RzILOpPure *op_MUL_291 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_280), DUP(op_AND_280))), CAST(16, MSB(DUP(op_AND_280)), DUP(op_AND_280))), CAST(32, MSB(CAST(16, MSB(op_AND_288), DUP(op_AND_288))), CAST(16, MSB(DUP(op_AND_288)), DUP(op_AND_288)))); + RzILOpPure *op_LSHIFT_294 = SHIFTL0(CAST(64, MSB(op_MUL_291), DUP(op_MUL_291)), SN(32, 0)); + RzILOpPure *op_RSHIFT_298 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_300 = LOGAND(op_RSHIFT_298, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_306 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_308 = LOGAND(op_RSHIFT_306, SN(32, 0xffff)); + RzILOpPure *op_MUL_311 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_300), DUP(op_AND_300))), CAST(16, MSB(DUP(op_AND_300)), DUP(op_AND_300))), CAST(32, MSB(CAST(16, MSB(op_AND_308), DUP(op_AND_308))), CAST(16, MSB(DUP(op_AND_308)), DUP(op_AND_308)))); + RzILOpPure *op_LSHIFT_314 = SHIFTL0(CAST(64, MSB(op_MUL_311), DUP(op_MUL_311)), SN(32, 0)); + RzILOpPure *op_ADD_315 = ADD(op_LSHIFT_294, op_LSHIFT_314); + RzILOpPure *op_EQ_316 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_269), SN(32, 0), SN(32, 0x20)), op_ADD_315); + RzILOpPure *op_RSHIFT_364 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_366 = LOGAND(op_RSHIFT_364, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_372 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_374 = LOGAND(op_RSHIFT_372, SN(32, 0xffff)); + RzILOpPure *op_MUL_377 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_366), DUP(op_AND_366))), CAST(16, MSB(DUP(op_AND_366)), DUP(op_AND_366))), CAST(32, MSB(CAST(16, MSB(op_AND_374), DUP(op_AND_374))), CAST(16, MSB(DUP(op_AND_374)), DUP(op_AND_374)))); + RzILOpPure *op_LSHIFT_380 = SHIFTL0(CAST(64, MSB(op_MUL_377), DUP(op_MUL_377)), SN(32, 0)); + RzILOpPure *op_RSHIFT_384 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_386 = LOGAND(op_RSHIFT_384, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_392 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_394 = LOGAND(op_RSHIFT_392, SN(32, 0xffff)); + RzILOpPure *op_MUL_397 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_386), DUP(op_AND_386))), CAST(16, MSB(DUP(op_AND_386)), DUP(op_AND_386))), CAST(32, MSB(CAST(16, MSB(op_AND_394), DUP(op_AND_394))), CAST(16, MSB(DUP(op_AND_394)), DUP(op_AND_394)))); + RzILOpPure *op_LSHIFT_400 = SHIFTL0(CAST(64, MSB(op_MUL_397), DUP(op_MUL_397)), SN(32, 0)); + RzILOpPure *op_ADD_401 = ADD(op_LSHIFT_380, op_LSHIFT_400); + RzILOpPure *op_LT_404 = SLT(op_ADD_401, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_409 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_410 = NEG(op_LSHIFT_409); + RzILOpPure *op_LSHIFT_415 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_418 = SUB(op_LSHIFT_415, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_419 = ITE(op_LT_404, op_NEG_410, op_SUB_418); + RzILOpEffect *gcc_expr_420 = BRANCH(op_EQ_316, EMPTY(), set_usr_field_call_360); + + // h_tmp253 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_422 = SETL("h_tmp253", cond_419); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_423 = SEQN(2, gcc_expr_420, op_ASSIGN_hybrid_tmp_422); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)) ? (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) : h_tmp253) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_223 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_224 = LOGNOT(op_LSHIFT_223); + RzILOpPure *op_AND_225 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_224); + RzILOpPure *op_RSHIFT_320 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_322 = LOGAND(op_RSHIFT_320, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_328 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_330 = LOGAND(op_RSHIFT_328, SN(32, 0xffff)); + RzILOpPure *op_MUL_333 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_322), DUP(op_AND_322))), CAST(16, MSB(DUP(op_AND_322)), DUP(op_AND_322))), CAST(32, MSB(CAST(16, MSB(op_AND_330), DUP(op_AND_330))), CAST(16, MSB(DUP(op_AND_330)), DUP(op_AND_330)))); + RzILOpPure *op_LSHIFT_336 = SHIFTL0(CAST(64, MSB(op_MUL_333), DUP(op_MUL_333)), SN(32, 0)); + RzILOpPure *op_RSHIFT_340 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_342 = LOGAND(op_RSHIFT_340, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_348 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_350 = LOGAND(op_RSHIFT_348, SN(32, 0xffff)); + RzILOpPure *op_MUL_353 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_342), DUP(op_AND_342))), CAST(16, MSB(DUP(op_AND_342)), DUP(op_AND_342))), CAST(32, MSB(CAST(16, MSB(op_AND_350), DUP(op_AND_350))), CAST(16, MSB(DUP(op_AND_350)), DUP(op_AND_350)))); + RzILOpPure *op_LSHIFT_356 = SHIFTL0(CAST(64, MSB(op_MUL_353), DUP(op_MUL_353)), SN(32, 0)); + RzILOpPure *op_ADD_357 = ADD(op_LSHIFT_336, op_LSHIFT_356); + RzILOpPure *cond_424 = ITE(DUP(op_EQ_316), op_ADD_357, VARL("h_tmp253")); + RzILOpPure *op_AND_426 = LOGAND(cond_424, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_430 = SHIFTL0(op_AND_426, SN(32, 0)); + RzILOpPure *op_OR_431 = LOGOR(op_AND_225, op_LSHIFT_430); + RzILOpEffect *op_ASSIGN_432 = WRITE_REG(bundle, Rdd_op, op_OR_431); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_433 = SEQN(2, seq_423, op_ASSIGN_432); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_217, seq_433); + return instruction_sequence; +} + +// Rdd = cmpy(Rs,Rt*):<<1:sat +RzILOpEffect *hex_il_op_m2_cmpysc_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_144 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_17 = LOGAND(op_RSHIFT_15, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_24 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_26 = LOGAND(op_RSHIFT_24, SN(32, 0xffff)); + RzILOpPure *op_MUL_29 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_17), DUP(op_AND_17))), CAST(16, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(CAST(16, MSB(op_AND_26), DUP(op_AND_26))), CAST(16, MSB(DUP(op_AND_26)), DUP(op_AND_26)))); + RzILOpPure *op_LSHIFT_32 = SHIFTL0(CAST(64, MSB(op_MUL_29), DUP(op_MUL_29)), SN(32, 1)); + RzILOpPure *op_RSHIFT_36 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_36, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_44 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_46 = LOGAND(op_RSHIFT_44, SN(32, 0xffff)); + RzILOpPure *op_MUL_49 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_38), DUP(op_AND_38))), CAST(16, MSB(DUP(op_AND_38)), DUP(op_AND_38))), CAST(32, MSB(CAST(16, MSB(op_AND_46), DUP(op_AND_46))), CAST(16, MSB(DUP(op_AND_46)), DUP(op_AND_46)))); + RzILOpPure *op_LSHIFT_52 = SHIFTL0(CAST(64, MSB(op_MUL_49), DUP(op_MUL_49)), SN(32, 1)); + RzILOpPure *op_SUB_53 = SUB(op_LSHIFT_32, op_LSHIFT_52); + RzILOpPure *op_RSHIFT_62 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_64 = LOGAND(op_RSHIFT_62, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_70 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_72 = LOGAND(op_RSHIFT_70, SN(32, 0xffff)); + RzILOpPure *op_MUL_75 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_64), DUP(op_AND_64))), CAST(16, MSB(DUP(op_AND_64)), DUP(op_AND_64))), CAST(32, MSB(CAST(16, MSB(op_AND_72), DUP(op_AND_72))), CAST(16, MSB(DUP(op_AND_72)), DUP(op_AND_72)))); + RzILOpPure *op_LSHIFT_78 = SHIFTL0(CAST(64, MSB(op_MUL_75), DUP(op_MUL_75)), SN(32, 1)); + RzILOpPure *op_RSHIFT_82 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_84 = LOGAND(op_RSHIFT_82, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_90 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_92 = LOGAND(op_RSHIFT_90, SN(32, 0xffff)); + RzILOpPure *op_MUL_95 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_84), DUP(op_AND_84))), CAST(16, MSB(DUP(op_AND_84)), DUP(op_AND_84))), CAST(32, MSB(CAST(16, MSB(op_AND_92), DUP(op_AND_92))), CAST(16, MSB(DUP(op_AND_92)), DUP(op_AND_92)))); + RzILOpPure *op_LSHIFT_98 = SHIFTL0(CAST(64, MSB(op_MUL_95), DUP(op_MUL_95)), SN(32, 1)); + RzILOpPure *op_SUB_99 = SUB(op_LSHIFT_78, op_LSHIFT_98); + RzILOpPure *op_EQ_100 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_53), SN(32, 0), SN(32, 0x20)), op_SUB_99); + RzILOpPure *op_RSHIFT_148 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_150 = LOGAND(op_RSHIFT_148, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_156 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_158 = LOGAND(op_RSHIFT_156, SN(32, 0xffff)); + RzILOpPure *op_MUL_161 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_150), DUP(op_AND_150))), CAST(16, MSB(DUP(op_AND_150)), DUP(op_AND_150))), CAST(32, MSB(CAST(16, MSB(op_AND_158), DUP(op_AND_158))), CAST(16, MSB(DUP(op_AND_158)), DUP(op_AND_158)))); + RzILOpPure *op_LSHIFT_164 = SHIFTL0(CAST(64, MSB(op_MUL_161), DUP(op_MUL_161)), SN(32, 1)); + RzILOpPure *op_RSHIFT_168 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_170 = LOGAND(op_RSHIFT_168, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_176 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_178 = LOGAND(op_RSHIFT_176, SN(32, 0xffff)); + RzILOpPure *op_MUL_181 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_170), DUP(op_AND_170))), CAST(16, MSB(DUP(op_AND_170)), DUP(op_AND_170))), CAST(32, MSB(CAST(16, MSB(op_AND_178), DUP(op_AND_178))), CAST(16, MSB(DUP(op_AND_178)), DUP(op_AND_178)))); + RzILOpPure *op_LSHIFT_184 = SHIFTL0(CAST(64, MSB(op_MUL_181), DUP(op_MUL_181)), SN(32, 1)); + RzILOpPure *op_SUB_185 = SUB(op_LSHIFT_164, op_LSHIFT_184); + RzILOpPure *op_LT_188 = SLT(op_SUB_185, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_193 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_194 = NEG(op_LSHIFT_193); + RzILOpPure *op_LSHIFT_199 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_202 = SUB(op_LSHIFT_199, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_203 = ITE(op_LT_188, op_NEG_194, op_SUB_202); + RzILOpEffect *gcc_expr_204 = BRANCH(op_EQ_100, EMPTY(), set_usr_field_call_144); + + // h_tmp254 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_206 = SETL("h_tmp254", cond_203); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_207 = SEQN(2, gcc_expr_204, op_ASSIGN_hybrid_tmp_206); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)) ? (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) : h_tmp254) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_104 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_106 = LOGAND(op_RSHIFT_104, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_112 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_114 = LOGAND(op_RSHIFT_112, SN(32, 0xffff)); + RzILOpPure *op_MUL_117 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_106), DUP(op_AND_106))), CAST(16, MSB(DUP(op_AND_106)), DUP(op_AND_106))), CAST(32, MSB(CAST(16, MSB(op_AND_114), DUP(op_AND_114))), CAST(16, MSB(DUP(op_AND_114)), DUP(op_AND_114)))); + RzILOpPure *op_LSHIFT_120 = SHIFTL0(CAST(64, MSB(op_MUL_117), DUP(op_MUL_117)), SN(32, 1)); + RzILOpPure *op_RSHIFT_124 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_126 = LOGAND(op_RSHIFT_124, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_132 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_134 = LOGAND(op_RSHIFT_132, SN(32, 0xffff)); + RzILOpPure *op_MUL_137 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_126), DUP(op_AND_126))), CAST(16, MSB(DUP(op_AND_126)), DUP(op_AND_126))), CAST(32, MSB(CAST(16, MSB(op_AND_134), DUP(op_AND_134))), CAST(16, MSB(DUP(op_AND_134)), DUP(op_AND_134)))); + RzILOpPure *op_LSHIFT_140 = SHIFTL0(CAST(64, MSB(op_MUL_137), DUP(op_MUL_137)), SN(32, 1)); + RzILOpPure *op_SUB_141 = SUB(op_LSHIFT_120, op_LSHIFT_140); + RzILOpPure *cond_208 = ITE(DUP(op_EQ_100), op_SUB_141, VARL("h_tmp254")); + RzILOpPure *op_AND_210 = LOGAND(cond_208, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_214 = SHIFTL0(op_AND_210, SN(32, 0x20)); + RzILOpPure *op_OR_215 = LOGOR(op_AND_7, op_LSHIFT_214); + RzILOpEffect *op_ASSIGN_216 = WRITE_REG(bundle, Rdd_op, op_OR_215); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_217 = SEQN(2, seq_207, op_ASSIGN_216); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_360 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_232 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_234 = LOGAND(op_RSHIFT_232, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_240 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_242 = LOGAND(op_RSHIFT_240, SN(32, 0xffff)); + RzILOpPure *op_MUL_245 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_234), DUP(op_AND_234))), CAST(16, MSB(DUP(op_AND_234)), DUP(op_AND_234))), CAST(32, MSB(CAST(16, MSB(op_AND_242), DUP(op_AND_242))), CAST(16, MSB(DUP(op_AND_242)), DUP(op_AND_242)))); + RzILOpPure *op_LSHIFT_248 = SHIFTL0(CAST(64, MSB(op_MUL_245), DUP(op_MUL_245)), SN(32, 1)); + RzILOpPure *op_RSHIFT_252 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_254 = LOGAND(op_RSHIFT_252, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_260 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_262 = LOGAND(op_RSHIFT_260, SN(32, 0xffff)); + RzILOpPure *op_MUL_265 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_254), DUP(op_AND_254))), CAST(16, MSB(DUP(op_AND_254)), DUP(op_AND_254))), CAST(32, MSB(CAST(16, MSB(op_AND_262), DUP(op_AND_262))), CAST(16, MSB(DUP(op_AND_262)), DUP(op_AND_262)))); + RzILOpPure *op_LSHIFT_268 = SHIFTL0(CAST(64, MSB(op_MUL_265), DUP(op_MUL_265)), SN(32, 1)); + RzILOpPure *op_ADD_269 = ADD(op_LSHIFT_248, op_LSHIFT_268); + RzILOpPure *op_RSHIFT_278 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_280 = LOGAND(op_RSHIFT_278, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_286 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_288 = LOGAND(op_RSHIFT_286, SN(32, 0xffff)); + RzILOpPure *op_MUL_291 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_280), DUP(op_AND_280))), CAST(16, MSB(DUP(op_AND_280)), DUP(op_AND_280))), CAST(32, MSB(CAST(16, MSB(op_AND_288), DUP(op_AND_288))), CAST(16, MSB(DUP(op_AND_288)), DUP(op_AND_288)))); + RzILOpPure *op_LSHIFT_294 = SHIFTL0(CAST(64, MSB(op_MUL_291), DUP(op_MUL_291)), SN(32, 1)); + RzILOpPure *op_RSHIFT_298 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_300 = LOGAND(op_RSHIFT_298, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_306 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_308 = LOGAND(op_RSHIFT_306, SN(32, 0xffff)); + RzILOpPure *op_MUL_311 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_300), DUP(op_AND_300))), CAST(16, MSB(DUP(op_AND_300)), DUP(op_AND_300))), CAST(32, MSB(CAST(16, MSB(op_AND_308), DUP(op_AND_308))), CAST(16, MSB(DUP(op_AND_308)), DUP(op_AND_308)))); + RzILOpPure *op_LSHIFT_314 = SHIFTL0(CAST(64, MSB(op_MUL_311), DUP(op_MUL_311)), SN(32, 1)); + RzILOpPure *op_ADD_315 = ADD(op_LSHIFT_294, op_LSHIFT_314); + RzILOpPure *op_EQ_316 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_269), SN(32, 0), SN(32, 0x20)), op_ADD_315); + RzILOpPure *op_RSHIFT_364 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_366 = LOGAND(op_RSHIFT_364, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_372 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_374 = LOGAND(op_RSHIFT_372, SN(32, 0xffff)); + RzILOpPure *op_MUL_377 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_366), DUP(op_AND_366))), CAST(16, MSB(DUP(op_AND_366)), DUP(op_AND_366))), CAST(32, MSB(CAST(16, MSB(op_AND_374), DUP(op_AND_374))), CAST(16, MSB(DUP(op_AND_374)), DUP(op_AND_374)))); + RzILOpPure *op_LSHIFT_380 = SHIFTL0(CAST(64, MSB(op_MUL_377), DUP(op_MUL_377)), SN(32, 1)); + RzILOpPure *op_RSHIFT_384 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_386 = LOGAND(op_RSHIFT_384, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_392 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_394 = LOGAND(op_RSHIFT_392, SN(32, 0xffff)); + RzILOpPure *op_MUL_397 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_386), DUP(op_AND_386))), CAST(16, MSB(DUP(op_AND_386)), DUP(op_AND_386))), CAST(32, MSB(CAST(16, MSB(op_AND_394), DUP(op_AND_394))), CAST(16, MSB(DUP(op_AND_394)), DUP(op_AND_394)))); + RzILOpPure *op_LSHIFT_400 = SHIFTL0(CAST(64, MSB(op_MUL_397), DUP(op_MUL_397)), SN(32, 1)); + RzILOpPure *op_ADD_401 = ADD(op_LSHIFT_380, op_LSHIFT_400); + RzILOpPure *op_LT_404 = SLT(op_ADD_401, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_409 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_410 = NEG(op_LSHIFT_409); + RzILOpPure *op_LSHIFT_415 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_418 = SUB(op_LSHIFT_415, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_419 = ITE(op_LT_404, op_NEG_410, op_SUB_418); + RzILOpEffect *gcc_expr_420 = BRANCH(op_EQ_316, EMPTY(), set_usr_field_call_360); + + // h_tmp255 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_422 = SETL("h_tmp255", cond_419); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_423 = SEQN(2, gcc_expr_420, op_ASSIGN_hybrid_tmp_422); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)) ? (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) : h_tmp255) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_223 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_224 = LOGNOT(op_LSHIFT_223); + RzILOpPure *op_AND_225 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_224); + RzILOpPure *op_RSHIFT_320 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_322 = LOGAND(op_RSHIFT_320, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_328 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_330 = LOGAND(op_RSHIFT_328, SN(32, 0xffff)); + RzILOpPure *op_MUL_333 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_322), DUP(op_AND_322))), CAST(16, MSB(DUP(op_AND_322)), DUP(op_AND_322))), CAST(32, MSB(CAST(16, MSB(op_AND_330), DUP(op_AND_330))), CAST(16, MSB(DUP(op_AND_330)), DUP(op_AND_330)))); + RzILOpPure *op_LSHIFT_336 = SHIFTL0(CAST(64, MSB(op_MUL_333), DUP(op_MUL_333)), SN(32, 1)); + RzILOpPure *op_RSHIFT_340 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_342 = LOGAND(op_RSHIFT_340, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_348 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_350 = LOGAND(op_RSHIFT_348, SN(32, 0xffff)); + RzILOpPure *op_MUL_353 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_342), DUP(op_AND_342))), CAST(16, MSB(DUP(op_AND_342)), DUP(op_AND_342))), CAST(32, MSB(CAST(16, MSB(op_AND_350), DUP(op_AND_350))), CAST(16, MSB(DUP(op_AND_350)), DUP(op_AND_350)))); + RzILOpPure *op_LSHIFT_356 = SHIFTL0(CAST(64, MSB(op_MUL_353), DUP(op_MUL_353)), SN(32, 1)); + RzILOpPure *op_ADD_357 = ADD(op_LSHIFT_336, op_LSHIFT_356); + RzILOpPure *cond_424 = ITE(DUP(op_EQ_316), op_ADD_357, VARL("h_tmp255")); + RzILOpPure *op_AND_426 = LOGAND(cond_424, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_430 = SHIFTL0(op_AND_426, SN(32, 0)); + RzILOpPure *op_OR_431 = LOGOR(op_AND_225, op_LSHIFT_430); + RzILOpEffect *op_ASSIGN_432 = WRITE_REG(bundle, Rdd_op, op_OR_431); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_433 = SEQN(2, seq_423, op_ASSIGN_432); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_217, seq_433); + return instruction_sequence; +} + +// Rxx -= cmpy(Rs,Rt):sat +RzILOpEffect *hex_il_op_m2_cnacs_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_171 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_32 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_34 = LOGAND(op_RSHIFT_32, SN(32, 0xffff)); + RzILOpPure *op_MUL_37 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_25), DUP(op_AND_25))), CAST(16, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(CAST(16, MSB(op_AND_34), DUP(op_AND_34))), CAST(16, MSB(DUP(op_AND_34)), DUP(op_AND_34)))); + RzILOpPure *op_LSHIFT_40 = SHIFTL0(CAST(64, MSB(op_MUL_37), DUP(op_MUL_37)), SN(32, 0)); + RzILOpPure *op_RSHIFT_44 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_46 = LOGAND(op_RSHIFT_44, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_52 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_54 = LOGAND(op_RSHIFT_52, SN(32, 0xffff)); + RzILOpPure *op_MUL_57 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_46), DUP(op_AND_46))), CAST(16, MSB(DUP(op_AND_46)), DUP(op_AND_46))), CAST(32, MSB(CAST(16, MSB(op_AND_54), DUP(op_AND_54))), CAST(16, MSB(DUP(op_AND_54)), DUP(op_AND_54)))); + RzILOpPure *op_LSHIFT_60 = SHIFTL0(CAST(64, MSB(op_MUL_57), DUP(op_MUL_57)), SN(32, 0)); + RzILOpPure *op_ADD_61 = ADD(op_LSHIFT_40, op_LSHIFT_60); + RzILOpPure *op_SUB_62 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_16), DUP(op_AND_16))), CAST(32, MSB(DUP(op_AND_16)), DUP(op_AND_16))), op_ADD_61); + RzILOpPure *op_RSHIFT_71 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_73 = LOGAND(op_RSHIFT_71, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_79 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_81 = LOGAND(op_RSHIFT_79, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_87 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_89 = LOGAND(op_RSHIFT_87, SN(32, 0xffff)); + RzILOpPure *op_MUL_92 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_81), DUP(op_AND_81))), CAST(16, MSB(DUP(op_AND_81)), DUP(op_AND_81))), CAST(32, MSB(CAST(16, MSB(op_AND_89), DUP(op_AND_89))), CAST(16, MSB(DUP(op_AND_89)), DUP(op_AND_89)))); + RzILOpPure *op_LSHIFT_95 = SHIFTL0(CAST(64, MSB(op_MUL_92), DUP(op_MUL_92)), SN(32, 0)); + RzILOpPure *op_RSHIFT_99 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_101 = LOGAND(op_RSHIFT_99, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_107 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_109 = LOGAND(op_RSHIFT_107, SN(32, 0xffff)); + RzILOpPure *op_MUL_112 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_101), DUP(op_AND_101))), CAST(16, MSB(DUP(op_AND_101)), DUP(op_AND_101))), CAST(32, MSB(CAST(16, MSB(op_AND_109), DUP(op_AND_109))), CAST(16, MSB(DUP(op_AND_109)), DUP(op_AND_109)))); + RzILOpPure *op_LSHIFT_115 = SHIFTL0(CAST(64, MSB(op_MUL_112), DUP(op_MUL_112)), SN(32, 0)); + RzILOpPure *op_ADD_116 = ADD(op_LSHIFT_95, op_LSHIFT_115); + RzILOpPure *op_SUB_117 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_73), DUP(op_AND_73))), CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73))), op_ADD_116); + RzILOpPure *op_EQ_118 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_62), SN(32, 0), SN(32, 0x20)), op_SUB_117); + RzILOpPure *op_RSHIFT_175 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_177 = LOGAND(op_RSHIFT_175, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_183 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_185 = LOGAND(op_RSHIFT_183, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_191 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_193 = LOGAND(op_RSHIFT_191, SN(32, 0xffff)); + RzILOpPure *op_MUL_196 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_185), DUP(op_AND_185))), CAST(16, MSB(DUP(op_AND_185)), DUP(op_AND_185))), CAST(32, MSB(CAST(16, MSB(op_AND_193), DUP(op_AND_193))), CAST(16, MSB(DUP(op_AND_193)), DUP(op_AND_193)))); + RzILOpPure *op_LSHIFT_199 = SHIFTL0(CAST(64, MSB(op_MUL_196), DUP(op_MUL_196)), SN(32, 0)); + RzILOpPure *op_RSHIFT_203 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_205 = LOGAND(op_RSHIFT_203, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_211 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_213 = LOGAND(op_RSHIFT_211, SN(32, 0xffff)); + RzILOpPure *op_MUL_216 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_205), DUP(op_AND_205))), CAST(16, MSB(DUP(op_AND_205)), DUP(op_AND_205))), CAST(32, MSB(CAST(16, MSB(op_AND_213), DUP(op_AND_213))), CAST(16, MSB(DUP(op_AND_213)), DUP(op_AND_213)))); + RzILOpPure *op_LSHIFT_219 = SHIFTL0(CAST(64, MSB(op_MUL_216), DUP(op_MUL_216)), SN(32, 0)); + RzILOpPure *op_ADD_220 = ADD(op_LSHIFT_199, op_LSHIFT_219); + RzILOpPure *op_SUB_221 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_177), DUP(op_AND_177))), CAST(32, MSB(DUP(op_AND_177)), DUP(op_AND_177))), op_ADD_220); + RzILOpPure *op_LT_224 = SLT(op_SUB_221, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_229 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_230 = NEG(op_LSHIFT_229); + RzILOpPure *op_LSHIFT_235 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_238 = SUB(op_LSHIFT_235, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_239 = ITE(op_LT_224, op_NEG_230, op_SUB_238); + RzILOpEffect *gcc_expr_240 = BRANCH(op_EQ_118, EMPTY(), set_usr_field_call_171); + + // h_tmp256 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_242 = SETL("h_tmp256", cond_239); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_243 = SEQN(2, gcc_expr_240, op_ASSIGN_hybrid_tmp_242); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)) ? ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) : h_tmp256) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_122 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_124 = LOGAND(op_RSHIFT_122, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_130 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_132 = LOGAND(op_RSHIFT_130, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_138 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_140 = LOGAND(op_RSHIFT_138, SN(32, 0xffff)); + RzILOpPure *op_MUL_143 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_132), DUP(op_AND_132))), CAST(16, MSB(DUP(op_AND_132)), DUP(op_AND_132))), CAST(32, MSB(CAST(16, MSB(op_AND_140), DUP(op_AND_140))), CAST(16, MSB(DUP(op_AND_140)), DUP(op_AND_140)))); + RzILOpPure *op_LSHIFT_146 = SHIFTL0(CAST(64, MSB(op_MUL_143), DUP(op_MUL_143)), SN(32, 0)); + RzILOpPure *op_RSHIFT_150 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_152 = LOGAND(op_RSHIFT_150, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_158 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_160 = LOGAND(op_RSHIFT_158, SN(32, 0xffff)); + RzILOpPure *op_MUL_163 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_152), DUP(op_AND_152))), CAST(16, MSB(DUP(op_AND_152)), DUP(op_AND_152))), CAST(32, MSB(CAST(16, MSB(op_AND_160), DUP(op_AND_160))), CAST(16, MSB(DUP(op_AND_160)), DUP(op_AND_160)))); + RzILOpPure *op_LSHIFT_166 = SHIFTL0(CAST(64, MSB(op_MUL_163), DUP(op_MUL_163)), SN(32, 0)); + RzILOpPure *op_ADD_167 = ADD(op_LSHIFT_146, op_LSHIFT_166); + RzILOpPure *op_SUB_168 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_124), DUP(op_AND_124))), CAST(32, MSB(DUP(op_AND_124)), DUP(op_AND_124))), op_ADD_167); + RzILOpPure *cond_244 = ITE(DUP(op_EQ_118), op_SUB_168, VARL("h_tmp256")); + RzILOpPure *op_AND_246 = LOGAND(cond_244, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_250 = SHIFTL0(op_AND_246, SN(32, 0x20)); + RzILOpPure *op_OR_251 = LOGOR(op_AND_7, op_LSHIFT_250); + RzILOpEffect *op_ASSIGN_252 = WRITE_REG(bundle, Rxx_op, op_OR_251); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_253 = SEQN(2, seq_243, op_ASSIGN_252); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_423 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_268 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_270 = LOGAND(op_RSHIFT_268, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_276 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_278 = LOGAND(op_RSHIFT_276, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_284 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_286 = LOGAND(op_RSHIFT_284, SN(32, 0xffff)); + RzILOpPure *op_MUL_289 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_278), DUP(op_AND_278))), CAST(16, MSB(DUP(op_AND_278)), DUP(op_AND_278))), CAST(32, MSB(CAST(16, MSB(op_AND_286), DUP(op_AND_286))), CAST(16, MSB(DUP(op_AND_286)), DUP(op_AND_286)))); + RzILOpPure *op_LSHIFT_292 = SHIFTL0(CAST(64, MSB(op_MUL_289), DUP(op_MUL_289)), SN(32, 0)); + RzILOpPure *op_RSHIFT_296 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_298 = LOGAND(op_RSHIFT_296, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_304 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_306 = LOGAND(op_RSHIFT_304, SN(32, 0xffff)); + RzILOpPure *op_MUL_309 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_298), DUP(op_AND_298))), CAST(16, MSB(DUP(op_AND_298)), DUP(op_AND_298))), CAST(32, MSB(CAST(16, MSB(op_AND_306), DUP(op_AND_306))), CAST(16, MSB(DUP(op_AND_306)), DUP(op_AND_306)))); + RzILOpPure *op_LSHIFT_312 = SHIFTL0(CAST(64, MSB(op_MUL_309), DUP(op_MUL_309)), SN(32, 0)); + RzILOpPure *op_SUB_313 = SUB(op_LSHIFT_292, op_LSHIFT_312); + RzILOpPure *op_SUB_314 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_270), DUP(op_AND_270))), CAST(32, MSB(DUP(op_AND_270)), DUP(op_AND_270))), op_SUB_313); + RzILOpPure *op_RSHIFT_323 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_325 = LOGAND(op_RSHIFT_323, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_331 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_333 = LOGAND(op_RSHIFT_331, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_339 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_341 = LOGAND(op_RSHIFT_339, SN(32, 0xffff)); + RzILOpPure *op_MUL_344 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_333), DUP(op_AND_333))), CAST(16, MSB(DUP(op_AND_333)), DUP(op_AND_333))), CAST(32, MSB(CAST(16, MSB(op_AND_341), DUP(op_AND_341))), CAST(16, MSB(DUP(op_AND_341)), DUP(op_AND_341)))); + RzILOpPure *op_LSHIFT_347 = SHIFTL0(CAST(64, MSB(op_MUL_344), DUP(op_MUL_344)), SN(32, 0)); + RzILOpPure *op_RSHIFT_351 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_353 = LOGAND(op_RSHIFT_351, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_359 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_361 = LOGAND(op_RSHIFT_359, SN(32, 0xffff)); + RzILOpPure *op_MUL_364 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_353), DUP(op_AND_353))), CAST(16, MSB(DUP(op_AND_353)), DUP(op_AND_353))), CAST(32, MSB(CAST(16, MSB(op_AND_361), DUP(op_AND_361))), CAST(16, MSB(DUP(op_AND_361)), DUP(op_AND_361)))); + RzILOpPure *op_LSHIFT_367 = SHIFTL0(CAST(64, MSB(op_MUL_364), DUP(op_MUL_364)), SN(32, 0)); + RzILOpPure *op_SUB_368 = SUB(op_LSHIFT_347, op_LSHIFT_367); + RzILOpPure *op_SUB_369 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_325), DUP(op_AND_325))), CAST(32, MSB(DUP(op_AND_325)), DUP(op_AND_325))), op_SUB_368); + RzILOpPure *op_EQ_370 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_314), SN(32, 0), SN(32, 0x20)), op_SUB_369); + RzILOpPure *op_RSHIFT_427 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_429 = LOGAND(op_RSHIFT_427, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_435 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_437 = LOGAND(op_RSHIFT_435, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_443 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_445 = LOGAND(op_RSHIFT_443, SN(32, 0xffff)); + RzILOpPure *op_MUL_448 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_437), DUP(op_AND_437))), CAST(16, MSB(DUP(op_AND_437)), DUP(op_AND_437))), CAST(32, MSB(CAST(16, MSB(op_AND_445), DUP(op_AND_445))), CAST(16, MSB(DUP(op_AND_445)), DUP(op_AND_445)))); + RzILOpPure *op_LSHIFT_451 = SHIFTL0(CAST(64, MSB(op_MUL_448), DUP(op_MUL_448)), SN(32, 0)); + RzILOpPure *op_RSHIFT_455 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_457 = LOGAND(op_RSHIFT_455, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_463 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_465 = LOGAND(op_RSHIFT_463, SN(32, 0xffff)); + RzILOpPure *op_MUL_468 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_457), DUP(op_AND_457))), CAST(16, MSB(DUP(op_AND_457)), DUP(op_AND_457))), CAST(32, MSB(CAST(16, MSB(op_AND_465), DUP(op_AND_465))), CAST(16, MSB(DUP(op_AND_465)), DUP(op_AND_465)))); + RzILOpPure *op_LSHIFT_471 = SHIFTL0(CAST(64, MSB(op_MUL_468), DUP(op_MUL_468)), SN(32, 0)); + RzILOpPure *op_SUB_472 = SUB(op_LSHIFT_451, op_LSHIFT_471); + RzILOpPure *op_SUB_473 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_429), DUP(op_AND_429))), CAST(32, MSB(DUP(op_AND_429)), DUP(op_AND_429))), op_SUB_472); + RzILOpPure *op_LT_476 = SLT(op_SUB_473, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_481 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_482 = NEG(op_LSHIFT_481); + RzILOpPure *op_LSHIFT_487 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_490 = SUB(op_LSHIFT_487, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_491 = ITE(op_LT_476, op_NEG_482, op_SUB_490); + RzILOpEffect *gcc_expr_492 = BRANCH(op_EQ_370, EMPTY(), set_usr_field_call_423); + + // h_tmp257 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_494 = SETL("h_tmp257", cond_491); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_495 = SEQN(2, gcc_expr_492, op_ASSIGN_hybrid_tmp_494); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)) ? ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) : h_tmp257) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_259 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_260 = LOGNOT(op_LSHIFT_259); + RzILOpPure *op_AND_261 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_260); + RzILOpPure *op_RSHIFT_374 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_376 = LOGAND(op_RSHIFT_374, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_382 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_384 = LOGAND(op_RSHIFT_382, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_390 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_392 = LOGAND(op_RSHIFT_390, SN(32, 0xffff)); + RzILOpPure *op_MUL_395 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_384), DUP(op_AND_384))), CAST(16, MSB(DUP(op_AND_384)), DUP(op_AND_384))), CAST(32, MSB(CAST(16, MSB(op_AND_392), DUP(op_AND_392))), CAST(16, MSB(DUP(op_AND_392)), DUP(op_AND_392)))); + RzILOpPure *op_LSHIFT_398 = SHIFTL0(CAST(64, MSB(op_MUL_395), DUP(op_MUL_395)), SN(32, 0)); + RzILOpPure *op_RSHIFT_402 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_404 = LOGAND(op_RSHIFT_402, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_410 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_412 = LOGAND(op_RSHIFT_410, SN(32, 0xffff)); + RzILOpPure *op_MUL_415 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_404), DUP(op_AND_404))), CAST(16, MSB(DUP(op_AND_404)), DUP(op_AND_404))), CAST(32, MSB(CAST(16, MSB(op_AND_412), DUP(op_AND_412))), CAST(16, MSB(DUP(op_AND_412)), DUP(op_AND_412)))); + RzILOpPure *op_LSHIFT_418 = SHIFTL0(CAST(64, MSB(op_MUL_415), DUP(op_MUL_415)), SN(32, 0)); + RzILOpPure *op_SUB_419 = SUB(op_LSHIFT_398, op_LSHIFT_418); + RzILOpPure *op_SUB_420 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_376), DUP(op_AND_376))), CAST(32, MSB(DUP(op_AND_376)), DUP(op_AND_376))), op_SUB_419); + RzILOpPure *cond_496 = ITE(DUP(op_EQ_370), op_SUB_420, VARL("h_tmp257")); + RzILOpPure *op_AND_498 = LOGAND(cond_496, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_502 = SHIFTL0(op_AND_498, SN(32, 0)); + RzILOpPure *op_OR_503 = LOGOR(op_AND_261, op_LSHIFT_502); + RzILOpEffect *op_ASSIGN_504 = WRITE_REG(bundle, Rxx_op, op_OR_503); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_505 = SEQN(2, seq_495, op_ASSIGN_504); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_253, seq_505); + return instruction_sequence; +} + +// Rxx -= cmpy(Rs,Rt):<<1:sat +RzILOpEffect *hex_il_op_m2_cnacs_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_171 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_32 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_34 = LOGAND(op_RSHIFT_32, SN(32, 0xffff)); + RzILOpPure *op_MUL_37 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_25), DUP(op_AND_25))), CAST(16, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(CAST(16, MSB(op_AND_34), DUP(op_AND_34))), CAST(16, MSB(DUP(op_AND_34)), DUP(op_AND_34)))); + RzILOpPure *op_LSHIFT_40 = SHIFTL0(CAST(64, MSB(op_MUL_37), DUP(op_MUL_37)), SN(32, 1)); + RzILOpPure *op_RSHIFT_44 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_46 = LOGAND(op_RSHIFT_44, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_52 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_54 = LOGAND(op_RSHIFT_52, SN(32, 0xffff)); + RzILOpPure *op_MUL_57 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_46), DUP(op_AND_46))), CAST(16, MSB(DUP(op_AND_46)), DUP(op_AND_46))), CAST(32, MSB(CAST(16, MSB(op_AND_54), DUP(op_AND_54))), CAST(16, MSB(DUP(op_AND_54)), DUP(op_AND_54)))); + RzILOpPure *op_LSHIFT_60 = SHIFTL0(CAST(64, MSB(op_MUL_57), DUP(op_MUL_57)), SN(32, 1)); + RzILOpPure *op_ADD_61 = ADD(op_LSHIFT_40, op_LSHIFT_60); + RzILOpPure *op_SUB_62 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_16), DUP(op_AND_16))), CAST(32, MSB(DUP(op_AND_16)), DUP(op_AND_16))), op_ADD_61); + RzILOpPure *op_RSHIFT_71 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_73 = LOGAND(op_RSHIFT_71, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_79 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_81 = LOGAND(op_RSHIFT_79, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_87 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_89 = LOGAND(op_RSHIFT_87, SN(32, 0xffff)); + RzILOpPure *op_MUL_92 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_81), DUP(op_AND_81))), CAST(16, MSB(DUP(op_AND_81)), DUP(op_AND_81))), CAST(32, MSB(CAST(16, MSB(op_AND_89), DUP(op_AND_89))), CAST(16, MSB(DUP(op_AND_89)), DUP(op_AND_89)))); + RzILOpPure *op_LSHIFT_95 = SHIFTL0(CAST(64, MSB(op_MUL_92), DUP(op_MUL_92)), SN(32, 1)); + RzILOpPure *op_RSHIFT_99 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_101 = LOGAND(op_RSHIFT_99, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_107 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_109 = LOGAND(op_RSHIFT_107, SN(32, 0xffff)); + RzILOpPure *op_MUL_112 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_101), DUP(op_AND_101))), CAST(16, MSB(DUP(op_AND_101)), DUP(op_AND_101))), CAST(32, MSB(CAST(16, MSB(op_AND_109), DUP(op_AND_109))), CAST(16, MSB(DUP(op_AND_109)), DUP(op_AND_109)))); + RzILOpPure *op_LSHIFT_115 = SHIFTL0(CAST(64, MSB(op_MUL_112), DUP(op_MUL_112)), SN(32, 1)); + RzILOpPure *op_ADD_116 = ADD(op_LSHIFT_95, op_LSHIFT_115); + RzILOpPure *op_SUB_117 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_73), DUP(op_AND_73))), CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73))), op_ADD_116); + RzILOpPure *op_EQ_118 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_62), SN(32, 0), SN(32, 0x20)), op_SUB_117); + RzILOpPure *op_RSHIFT_175 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_177 = LOGAND(op_RSHIFT_175, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_183 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_185 = LOGAND(op_RSHIFT_183, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_191 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_193 = LOGAND(op_RSHIFT_191, SN(32, 0xffff)); + RzILOpPure *op_MUL_196 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_185), DUP(op_AND_185))), CAST(16, MSB(DUP(op_AND_185)), DUP(op_AND_185))), CAST(32, MSB(CAST(16, MSB(op_AND_193), DUP(op_AND_193))), CAST(16, MSB(DUP(op_AND_193)), DUP(op_AND_193)))); + RzILOpPure *op_LSHIFT_199 = SHIFTL0(CAST(64, MSB(op_MUL_196), DUP(op_MUL_196)), SN(32, 1)); + RzILOpPure *op_RSHIFT_203 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_205 = LOGAND(op_RSHIFT_203, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_211 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_213 = LOGAND(op_RSHIFT_211, SN(32, 0xffff)); + RzILOpPure *op_MUL_216 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_205), DUP(op_AND_205))), CAST(16, MSB(DUP(op_AND_205)), DUP(op_AND_205))), CAST(32, MSB(CAST(16, MSB(op_AND_213), DUP(op_AND_213))), CAST(16, MSB(DUP(op_AND_213)), DUP(op_AND_213)))); + RzILOpPure *op_LSHIFT_219 = SHIFTL0(CAST(64, MSB(op_MUL_216), DUP(op_MUL_216)), SN(32, 1)); + RzILOpPure *op_ADD_220 = ADD(op_LSHIFT_199, op_LSHIFT_219); + RzILOpPure *op_SUB_221 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_177), DUP(op_AND_177))), CAST(32, MSB(DUP(op_AND_177)), DUP(op_AND_177))), op_ADD_220); + RzILOpPure *op_LT_224 = SLT(op_SUB_221, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_229 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_230 = NEG(op_LSHIFT_229); + RzILOpPure *op_LSHIFT_235 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_238 = SUB(op_LSHIFT_235, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_239 = ITE(op_LT_224, op_NEG_230, op_SUB_238); + RzILOpEffect *gcc_expr_240 = BRANCH(op_EQ_118, EMPTY(), set_usr_field_call_171); + + // h_tmp258 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_242 = SETL("h_tmp258", cond_239); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_243 = SEQN(2, gcc_expr_240, op_ASSIGN_hybrid_tmp_242); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)) ? ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) : h_tmp258) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_122 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_124 = LOGAND(op_RSHIFT_122, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_130 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_132 = LOGAND(op_RSHIFT_130, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_138 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_140 = LOGAND(op_RSHIFT_138, SN(32, 0xffff)); + RzILOpPure *op_MUL_143 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_132), DUP(op_AND_132))), CAST(16, MSB(DUP(op_AND_132)), DUP(op_AND_132))), CAST(32, MSB(CAST(16, MSB(op_AND_140), DUP(op_AND_140))), CAST(16, MSB(DUP(op_AND_140)), DUP(op_AND_140)))); + RzILOpPure *op_LSHIFT_146 = SHIFTL0(CAST(64, MSB(op_MUL_143), DUP(op_MUL_143)), SN(32, 1)); + RzILOpPure *op_RSHIFT_150 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_152 = LOGAND(op_RSHIFT_150, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_158 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_160 = LOGAND(op_RSHIFT_158, SN(32, 0xffff)); + RzILOpPure *op_MUL_163 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_152), DUP(op_AND_152))), CAST(16, MSB(DUP(op_AND_152)), DUP(op_AND_152))), CAST(32, MSB(CAST(16, MSB(op_AND_160), DUP(op_AND_160))), CAST(16, MSB(DUP(op_AND_160)), DUP(op_AND_160)))); + RzILOpPure *op_LSHIFT_166 = SHIFTL0(CAST(64, MSB(op_MUL_163), DUP(op_MUL_163)), SN(32, 1)); + RzILOpPure *op_ADD_167 = ADD(op_LSHIFT_146, op_LSHIFT_166); + RzILOpPure *op_SUB_168 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_124), DUP(op_AND_124))), CAST(32, MSB(DUP(op_AND_124)), DUP(op_AND_124))), op_ADD_167); + RzILOpPure *cond_244 = ITE(DUP(op_EQ_118), op_SUB_168, VARL("h_tmp258")); + RzILOpPure *op_AND_246 = LOGAND(cond_244, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_250 = SHIFTL0(op_AND_246, SN(32, 0x20)); + RzILOpPure *op_OR_251 = LOGOR(op_AND_7, op_LSHIFT_250); + RzILOpEffect *op_ASSIGN_252 = WRITE_REG(bundle, Rxx_op, op_OR_251); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_253 = SEQN(2, seq_243, op_ASSIGN_252); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_423 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_268 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_270 = LOGAND(op_RSHIFT_268, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_276 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_278 = LOGAND(op_RSHIFT_276, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_284 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_286 = LOGAND(op_RSHIFT_284, SN(32, 0xffff)); + RzILOpPure *op_MUL_289 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_278), DUP(op_AND_278))), CAST(16, MSB(DUP(op_AND_278)), DUP(op_AND_278))), CAST(32, MSB(CAST(16, MSB(op_AND_286), DUP(op_AND_286))), CAST(16, MSB(DUP(op_AND_286)), DUP(op_AND_286)))); + RzILOpPure *op_LSHIFT_292 = SHIFTL0(CAST(64, MSB(op_MUL_289), DUP(op_MUL_289)), SN(32, 1)); + RzILOpPure *op_RSHIFT_296 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_298 = LOGAND(op_RSHIFT_296, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_304 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_306 = LOGAND(op_RSHIFT_304, SN(32, 0xffff)); + RzILOpPure *op_MUL_309 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_298), DUP(op_AND_298))), CAST(16, MSB(DUP(op_AND_298)), DUP(op_AND_298))), CAST(32, MSB(CAST(16, MSB(op_AND_306), DUP(op_AND_306))), CAST(16, MSB(DUP(op_AND_306)), DUP(op_AND_306)))); + RzILOpPure *op_LSHIFT_312 = SHIFTL0(CAST(64, MSB(op_MUL_309), DUP(op_MUL_309)), SN(32, 1)); + RzILOpPure *op_SUB_313 = SUB(op_LSHIFT_292, op_LSHIFT_312); + RzILOpPure *op_SUB_314 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_270), DUP(op_AND_270))), CAST(32, MSB(DUP(op_AND_270)), DUP(op_AND_270))), op_SUB_313); + RzILOpPure *op_RSHIFT_323 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_325 = LOGAND(op_RSHIFT_323, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_331 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_333 = LOGAND(op_RSHIFT_331, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_339 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_341 = LOGAND(op_RSHIFT_339, SN(32, 0xffff)); + RzILOpPure *op_MUL_344 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_333), DUP(op_AND_333))), CAST(16, MSB(DUP(op_AND_333)), DUP(op_AND_333))), CAST(32, MSB(CAST(16, MSB(op_AND_341), DUP(op_AND_341))), CAST(16, MSB(DUP(op_AND_341)), DUP(op_AND_341)))); + RzILOpPure *op_LSHIFT_347 = SHIFTL0(CAST(64, MSB(op_MUL_344), DUP(op_MUL_344)), SN(32, 1)); + RzILOpPure *op_RSHIFT_351 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_353 = LOGAND(op_RSHIFT_351, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_359 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_361 = LOGAND(op_RSHIFT_359, SN(32, 0xffff)); + RzILOpPure *op_MUL_364 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_353), DUP(op_AND_353))), CAST(16, MSB(DUP(op_AND_353)), DUP(op_AND_353))), CAST(32, MSB(CAST(16, MSB(op_AND_361), DUP(op_AND_361))), CAST(16, MSB(DUP(op_AND_361)), DUP(op_AND_361)))); + RzILOpPure *op_LSHIFT_367 = SHIFTL0(CAST(64, MSB(op_MUL_364), DUP(op_MUL_364)), SN(32, 1)); + RzILOpPure *op_SUB_368 = SUB(op_LSHIFT_347, op_LSHIFT_367); + RzILOpPure *op_SUB_369 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_325), DUP(op_AND_325))), CAST(32, MSB(DUP(op_AND_325)), DUP(op_AND_325))), op_SUB_368); + RzILOpPure *op_EQ_370 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_314), SN(32, 0), SN(32, 0x20)), op_SUB_369); + RzILOpPure *op_RSHIFT_427 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_429 = LOGAND(op_RSHIFT_427, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_435 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_437 = LOGAND(op_RSHIFT_435, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_443 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_445 = LOGAND(op_RSHIFT_443, SN(32, 0xffff)); + RzILOpPure *op_MUL_448 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_437), DUP(op_AND_437))), CAST(16, MSB(DUP(op_AND_437)), DUP(op_AND_437))), CAST(32, MSB(CAST(16, MSB(op_AND_445), DUP(op_AND_445))), CAST(16, MSB(DUP(op_AND_445)), DUP(op_AND_445)))); + RzILOpPure *op_LSHIFT_451 = SHIFTL0(CAST(64, MSB(op_MUL_448), DUP(op_MUL_448)), SN(32, 1)); + RzILOpPure *op_RSHIFT_455 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_457 = LOGAND(op_RSHIFT_455, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_463 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_465 = LOGAND(op_RSHIFT_463, SN(32, 0xffff)); + RzILOpPure *op_MUL_468 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_457), DUP(op_AND_457))), CAST(16, MSB(DUP(op_AND_457)), DUP(op_AND_457))), CAST(32, MSB(CAST(16, MSB(op_AND_465), DUP(op_AND_465))), CAST(16, MSB(DUP(op_AND_465)), DUP(op_AND_465)))); + RzILOpPure *op_LSHIFT_471 = SHIFTL0(CAST(64, MSB(op_MUL_468), DUP(op_MUL_468)), SN(32, 1)); + RzILOpPure *op_SUB_472 = SUB(op_LSHIFT_451, op_LSHIFT_471); + RzILOpPure *op_SUB_473 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_429), DUP(op_AND_429))), CAST(32, MSB(DUP(op_AND_429)), DUP(op_AND_429))), op_SUB_472); + RzILOpPure *op_LT_476 = SLT(op_SUB_473, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_481 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_482 = NEG(op_LSHIFT_481); + RzILOpPure *op_LSHIFT_487 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_490 = SUB(op_LSHIFT_487, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_491 = ITE(op_LT_476, op_NEG_482, op_SUB_490); + RzILOpEffect *gcc_expr_492 = BRANCH(op_EQ_370, EMPTY(), set_usr_field_call_423); + + // h_tmp259 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_494 = SETL("h_tmp259", cond_491); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_495 = SEQN(2, gcc_expr_492, op_ASSIGN_hybrid_tmp_494); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)) ? ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) : h_tmp259) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_259 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_260 = LOGNOT(op_LSHIFT_259); + RzILOpPure *op_AND_261 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_260); + RzILOpPure *op_RSHIFT_374 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_376 = LOGAND(op_RSHIFT_374, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_382 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_384 = LOGAND(op_RSHIFT_382, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_390 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_392 = LOGAND(op_RSHIFT_390, SN(32, 0xffff)); + RzILOpPure *op_MUL_395 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_384), DUP(op_AND_384))), CAST(16, MSB(DUP(op_AND_384)), DUP(op_AND_384))), CAST(32, MSB(CAST(16, MSB(op_AND_392), DUP(op_AND_392))), CAST(16, MSB(DUP(op_AND_392)), DUP(op_AND_392)))); + RzILOpPure *op_LSHIFT_398 = SHIFTL0(CAST(64, MSB(op_MUL_395), DUP(op_MUL_395)), SN(32, 1)); + RzILOpPure *op_RSHIFT_402 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_404 = LOGAND(op_RSHIFT_402, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_410 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_412 = LOGAND(op_RSHIFT_410, SN(32, 0xffff)); + RzILOpPure *op_MUL_415 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_404), DUP(op_AND_404))), CAST(16, MSB(DUP(op_AND_404)), DUP(op_AND_404))), CAST(32, MSB(CAST(16, MSB(op_AND_412), DUP(op_AND_412))), CAST(16, MSB(DUP(op_AND_412)), DUP(op_AND_412)))); + RzILOpPure *op_LSHIFT_418 = SHIFTL0(CAST(64, MSB(op_MUL_415), DUP(op_MUL_415)), SN(32, 1)); + RzILOpPure *op_SUB_419 = SUB(op_LSHIFT_398, op_LSHIFT_418); + RzILOpPure *op_SUB_420 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_376), DUP(op_AND_376))), CAST(32, MSB(DUP(op_AND_376)), DUP(op_AND_376))), op_SUB_419); + RzILOpPure *cond_496 = ITE(DUP(op_EQ_370), op_SUB_420, VARL("h_tmp259")); + RzILOpPure *op_AND_498 = LOGAND(cond_496, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_502 = SHIFTL0(op_AND_498, SN(32, 0)); + RzILOpPure *op_OR_503 = LOGOR(op_AND_261, op_LSHIFT_502); + RzILOpEffect *op_ASSIGN_504 = WRITE_REG(bundle, Rxx_op, op_OR_503); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_505 = SEQN(2, seq_495, op_ASSIGN_504); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_253, seq_505); + return instruction_sequence; +} + +// Rxx -= cmpy(Rs,Rt*):sat +RzILOpEffect *hex_il_op_m2_cnacsc_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_171 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_32 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_34 = LOGAND(op_RSHIFT_32, SN(32, 0xffff)); + RzILOpPure *op_MUL_37 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_25), DUP(op_AND_25))), CAST(16, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(CAST(16, MSB(op_AND_34), DUP(op_AND_34))), CAST(16, MSB(DUP(op_AND_34)), DUP(op_AND_34)))); + RzILOpPure *op_LSHIFT_40 = SHIFTL0(CAST(64, MSB(op_MUL_37), DUP(op_MUL_37)), SN(32, 0)); + RzILOpPure *op_RSHIFT_44 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_46 = LOGAND(op_RSHIFT_44, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_52 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_54 = LOGAND(op_RSHIFT_52, SN(32, 0xffff)); + RzILOpPure *op_MUL_57 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_46), DUP(op_AND_46))), CAST(16, MSB(DUP(op_AND_46)), DUP(op_AND_46))), CAST(32, MSB(CAST(16, MSB(op_AND_54), DUP(op_AND_54))), CAST(16, MSB(DUP(op_AND_54)), DUP(op_AND_54)))); + RzILOpPure *op_LSHIFT_60 = SHIFTL0(CAST(64, MSB(op_MUL_57), DUP(op_MUL_57)), SN(32, 0)); + RzILOpPure *op_SUB_61 = SUB(op_LSHIFT_40, op_LSHIFT_60); + RzILOpPure *op_SUB_62 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_16), DUP(op_AND_16))), CAST(32, MSB(DUP(op_AND_16)), DUP(op_AND_16))), op_SUB_61); + RzILOpPure *op_RSHIFT_71 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_73 = LOGAND(op_RSHIFT_71, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_79 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_81 = LOGAND(op_RSHIFT_79, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_87 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_89 = LOGAND(op_RSHIFT_87, SN(32, 0xffff)); + RzILOpPure *op_MUL_92 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_81), DUP(op_AND_81))), CAST(16, MSB(DUP(op_AND_81)), DUP(op_AND_81))), CAST(32, MSB(CAST(16, MSB(op_AND_89), DUP(op_AND_89))), CAST(16, MSB(DUP(op_AND_89)), DUP(op_AND_89)))); + RzILOpPure *op_LSHIFT_95 = SHIFTL0(CAST(64, MSB(op_MUL_92), DUP(op_MUL_92)), SN(32, 0)); + RzILOpPure *op_RSHIFT_99 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_101 = LOGAND(op_RSHIFT_99, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_107 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_109 = LOGAND(op_RSHIFT_107, SN(32, 0xffff)); + RzILOpPure *op_MUL_112 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_101), DUP(op_AND_101))), CAST(16, MSB(DUP(op_AND_101)), DUP(op_AND_101))), CAST(32, MSB(CAST(16, MSB(op_AND_109), DUP(op_AND_109))), CAST(16, MSB(DUP(op_AND_109)), DUP(op_AND_109)))); + RzILOpPure *op_LSHIFT_115 = SHIFTL0(CAST(64, MSB(op_MUL_112), DUP(op_MUL_112)), SN(32, 0)); + RzILOpPure *op_SUB_116 = SUB(op_LSHIFT_95, op_LSHIFT_115); + RzILOpPure *op_SUB_117 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_73), DUP(op_AND_73))), CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73))), op_SUB_116); + RzILOpPure *op_EQ_118 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_62), SN(32, 0), SN(32, 0x20)), op_SUB_117); + RzILOpPure *op_RSHIFT_175 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_177 = LOGAND(op_RSHIFT_175, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_183 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_185 = LOGAND(op_RSHIFT_183, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_191 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_193 = LOGAND(op_RSHIFT_191, SN(32, 0xffff)); + RzILOpPure *op_MUL_196 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_185), DUP(op_AND_185))), CAST(16, MSB(DUP(op_AND_185)), DUP(op_AND_185))), CAST(32, MSB(CAST(16, MSB(op_AND_193), DUP(op_AND_193))), CAST(16, MSB(DUP(op_AND_193)), DUP(op_AND_193)))); + RzILOpPure *op_LSHIFT_199 = SHIFTL0(CAST(64, MSB(op_MUL_196), DUP(op_MUL_196)), SN(32, 0)); + RzILOpPure *op_RSHIFT_203 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_205 = LOGAND(op_RSHIFT_203, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_211 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_213 = LOGAND(op_RSHIFT_211, SN(32, 0xffff)); + RzILOpPure *op_MUL_216 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_205), DUP(op_AND_205))), CAST(16, MSB(DUP(op_AND_205)), DUP(op_AND_205))), CAST(32, MSB(CAST(16, MSB(op_AND_213), DUP(op_AND_213))), CAST(16, MSB(DUP(op_AND_213)), DUP(op_AND_213)))); + RzILOpPure *op_LSHIFT_219 = SHIFTL0(CAST(64, MSB(op_MUL_216), DUP(op_MUL_216)), SN(32, 0)); + RzILOpPure *op_SUB_220 = SUB(op_LSHIFT_199, op_LSHIFT_219); + RzILOpPure *op_SUB_221 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_177), DUP(op_AND_177))), CAST(32, MSB(DUP(op_AND_177)), DUP(op_AND_177))), op_SUB_220); + RzILOpPure *op_LT_224 = SLT(op_SUB_221, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_229 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_230 = NEG(op_LSHIFT_229); + RzILOpPure *op_LSHIFT_235 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_238 = SUB(op_LSHIFT_235, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_239 = ITE(op_LT_224, op_NEG_230, op_SUB_238); + RzILOpEffect *gcc_expr_240 = BRANCH(op_EQ_118, EMPTY(), set_usr_field_call_171); + + // h_tmp260 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_242 = SETL("h_tmp260", cond_239); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_243 = SEQN(2, gcc_expr_240, op_ASSIGN_hybrid_tmp_242); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)) ? ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) : h_tmp260) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_122 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_124 = LOGAND(op_RSHIFT_122, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_130 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_132 = LOGAND(op_RSHIFT_130, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_138 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_140 = LOGAND(op_RSHIFT_138, SN(32, 0xffff)); + RzILOpPure *op_MUL_143 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_132), DUP(op_AND_132))), CAST(16, MSB(DUP(op_AND_132)), DUP(op_AND_132))), CAST(32, MSB(CAST(16, MSB(op_AND_140), DUP(op_AND_140))), CAST(16, MSB(DUP(op_AND_140)), DUP(op_AND_140)))); + RzILOpPure *op_LSHIFT_146 = SHIFTL0(CAST(64, MSB(op_MUL_143), DUP(op_MUL_143)), SN(32, 0)); + RzILOpPure *op_RSHIFT_150 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_152 = LOGAND(op_RSHIFT_150, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_158 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_160 = LOGAND(op_RSHIFT_158, SN(32, 0xffff)); + RzILOpPure *op_MUL_163 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_152), DUP(op_AND_152))), CAST(16, MSB(DUP(op_AND_152)), DUP(op_AND_152))), CAST(32, MSB(CAST(16, MSB(op_AND_160), DUP(op_AND_160))), CAST(16, MSB(DUP(op_AND_160)), DUP(op_AND_160)))); + RzILOpPure *op_LSHIFT_166 = SHIFTL0(CAST(64, MSB(op_MUL_163), DUP(op_MUL_163)), SN(32, 0)); + RzILOpPure *op_SUB_167 = SUB(op_LSHIFT_146, op_LSHIFT_166); + RzILOpPure *op_SUB_168 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_124), DUP(op_AND_124))), CAST(32, MSB(DUP(op_AND_124)), DUP(op_AND_124))), op_SUB_167); + RzILOpPure *cond_244 = ITE(DUP(op_EQ_118), op_SUB_168, VARL("h_tmp260")); + RzILOpPure *op_AND_246 = LOGAND(cond_244, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_250 = SHIFTL0(op_AND_246, SN(32, 0x20)); + RzILOpPure *op_OR_251 = LOGOR(op_AND_7, op_LSHIFT_250); + RzILOpEffect *op_ASSIGN_252 = WRITE_REG(bundle, Rxx_op, op_OR_251); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_253 = SEQN(2, seq_243, op_ASSIGN_252); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_423 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_268 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_270 = LOGAND(op_RSHIFT_268, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_276 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_278 = LOGAND(op_RSHIFT_276, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_284 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_286 = LOGAND(op_RSHIFT_284, SN(32, 0xffff)); + RzILOpPure *op_MUL_289 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_278), DUP(op_AND_278))), CAST(16, MSB(DUP(op_AND_278)), DUP(op_AND_278))), CAST(32, MSB(CAST(16, MSB(op_AND_286), DUP(op_AND_286))), CAST(16, MSB(DUP(op_AND_286)), DUP(op_AND_286)))); + RzILOpPure *op_LSHIFT_292 = SHIFTL0(CAST(64, MSB(op_MUL_289), DUP(op_MUL_289)), SN(32, 0)); + RzILOpPure *op_RSHIFT_296 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_298 = LOGAND(op_RSHIFT_296, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_304 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_306 = LOGAND(op_RSHIFT_304, SN(32, 0xffff)); + RzILOpPure *op_MUL_309 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_298), DUP(op_AND_298))), CAST(16, MSB(DUP(op_AND_298)), DUP(op_AND_298))), CAST(32, MSB(CAST(16, MSB(op_AND_306), DUP(op_AND_306))), CAST(16, MSB(DUP(op_AND_306)), DUP(op_AND_306)))); + RzILOpPure *op_LSHIFT_312 = SHIFTL0(CAST(64, MSB(op_MUL_309), DUP(op_MUL_309)), SN(32, 0)); + RzILOpPure *op_ADD_313 = ADD(op_LSHIFT_292, op_LSHIFT_312); + RzILOpPure *op_SUB_314 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_270), DUP(op_AND_270))), CAST(32, MSB(DUP(op_AND_270)), DUP(op_AND_270))), op_ADD_313); + RzILOpPure *op_RSHIFT_323 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_325 = LOGAND(op_RSHIFT_323, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_331 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_333 = LOGAND(op_RSHIFT_331, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_339 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_341 = LOGAND(op_RSHIFT_339, SN(32, 0xffff)); + RzILOpPure *op_MUL_344 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_333), DUP(op_AND_333))), CAST(16, MSB(DUP(op_AND_333)), DUP(op_AND_333))), CAST(32, MSB(CAST(16, MSB(op_AND_341), DUP(op_AND_341))), CAST(16, MSB(DUP(op_AND_341)), DUP(op_AND_341)))); + RzILOpPure *op_LSHIFT_347 = SHIFTL0(CAST(64, MSB(op_MUL_344), DUP(op_MUL_344)), SN(32, 0)); + RzILOpPure *op_RSHIFT_351 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_353 = LOGAND(op_RSHIFT_351, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_359 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_361 = LOGAND(op_RSHIFT_359, SN(32, 0xffff)); + RzILOpPure *op_MUL_364 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_353), DUP(op_AND_353))), CAST(16, MSB(DUP(op_AND_353)), DUP(op_AND_353))), CAST(32, MSB(CAST(16, MSB(op_AND_361), DUP(op_AND_361))), CAST(16, MSB(DUP(op_AND_361)), DUP(op_AND_361)))); + RzILOpPure *op_LSHIFT_367 = SHIFTL0(CAST(64, MSB(op_MUL_364), DUP(op_MUL_364)), SN(32, 0)); + RzILOpPure *op_ADD_368 = ADD(op_LSHIFT_347, op_LSHIFT_367); + RzILOpPure *op_SUB_369 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_325), DUP(op_AND_325))), CAST(32, MSB(DUP(op_AND_325)), DUP(op_AND_325))), op_ADD_368); + RzILOpPure *op_EQ_370 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_314), SN(32, 0), SN(32, 0x20)), op_SUB_369); + RzILOpPure *op_RSHIFT_427 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_429 = LOGAND(op_RSHIFT_427, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_435 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_437 = LOGAND(op_RSHIFT_435, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_443 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_445 = LOGAND(op_RSHIFT_443, SN(32, 0xffff)); + RzILOpPure *op_MUL_448 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_437), DUP(op_AND_437))), CAST(16, MSB(DUP(op_AND_437)), DUP(op_AND_437))), CAST(32, MSB(CAST(16, MSB(op_AND_445), DUP(op_AND_445))), CAST(16, MSB(DUP(op_AND_445)), DUP(op_AND_445)))); + RzILOpPure *op_LSHIFT_451 = SHIFTL0(CAST(64, MSB(op_MUL_448), DUP(op_MUL_448)), SN(32, 0)); + RzILOpPure *op_RSHIFT_455 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_457 = LOGAND(op_RSHIFT_455, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_463 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_465 = LOGAND(op_RSHIFT_463, SN(32, 0xffff)); + RzILOpPure *op_MUL_468 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_457), DUP(op_AND_457))), CAST(16, MSB(DUP(op_AND_457)), DUP(op_AND_457))), CAST(32, MSB(CAST(16, MSB(op_AND_465), DUP(op_AND_465))), CAST(16, MSB(DUP(op_AND_465)), DUP(op_AND_465)))); + RzILOpPure *op_LSHIFT_471 = SHIFTL0(CAST(64, MSB(op_MUL_468), DUP(op_MUL_468)), SN(32, 0)); + RzILOpPure *op_ADD_472 = ADD(op_LSHIFT_451, op_LSHIFT_471); + RzILOpPure *op_SUB_473 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_429), DUP(op_AND_429))), CAST(32, MSB(DUP(op_AND_429)), DUP(op_AND_429))), op_ADD_472); + RzILOpPure *op_LT_476 = SLT(op_SUB_473, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_481 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_482 = NEG(op_LSHIFT_481); + RzILOpPure *op_LSHIFT_487 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_490 = SUB(op_LSHIFT_487, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_491 = ITE(op_LT_476, op_NEG_482, op_SUB_490); + RzILOpEffect *gcc_expr_492 = BRANCH(op_EQ_370, EMPTY(), set_usr_field_call_423); + + // h_tmp261 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_494 = SETL("h_tmp261", cond_491); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_495 = SEQN(2, gcc_expr_492, op_ASSIGN_hybrid_tmp_494); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)) ? ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) : h_tmp261) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_259 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_260 = LOGNOT(op_LSHIFT_259); + RzILOpPure *op_AND_261 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_260); + RzILOpPure *op_RSHIFT_374 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_376 = LOGAND(op_RSHIFT_374, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_382 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_384 = LOGAND(op_RSHIFT_382, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_390 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_392 = LOGAND(op_RSHIFT_390, SN(32, 0xffff)); + RzILOpPure *op_MUL_395 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_384), DUP(op_AND_384))), CAST(16, MSB(DUP(op_AND_384)), DUP(op_AND_384))), CAST(32, MSB(CAST(16, MSB(op_AND_392), DUP(op_AND_392))), CAST(16, MSB(DUP(op_AND_392)), DUP(op_AND_392)))); + RzILOpPure *op_LSHIFT_398 = SHIFTL0(CAST(64, MSB(op_MUL_395), DUP(op_MUL_395)), SN(32, 0)); + RzILOpPure *op_RSHIFT_402 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_404 = LOGAND(op_RSHIFT_402, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_410 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_412 = LOGAND(op_RSHIFT_410, SN(32, 0xffff)); + RzILOpPure *op_MUL_415 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_404), DUP(op_AND_404))), CAST(16, MSB(DUP(op_AND_404)), DUP(op_AND_404))), CAST(32, MSB(CAST(16, MSB(op_AND_412), DUP(op_AND_412))), CAST(16, MSB(DUP(op_AND_412)), DUP(op_AND_412)))); + RzILOpPure *op_LSHIFT_418 = SHIFTL0(CAST(64, MSB(op_MUL_415), DUP(op_MUL_415)), SN(32, 0)); + RzILOpPure *op_ADD_419 = ADD(op_LSHIFT_398, op_LSHIFT_418); + RzILOpPure *op_SUB_420 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_376), DUP(op_AND_376))), CAST(32, MSB(DUP(op_AND_376)), DUP(op_AND_376))), op_ADD_419); + RzILOpPure *cond_496 = ITE(DUP(op_EQ_370), op_SUB_420, VARL("h_tmp261")); + RzILOpPure *op_AND_498 = LOGAND(cond_496, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_502 = SHIFTL0(op_AND_498, SN(32, 0)); + RzILOpPure *op_OR_503 = LOGOR(op_AND_261, op_LSHIFT_502); + RzILOpEffect *op_ASSIGN_504 = WRITE_REG(bundle, Rxx_op, op_OR_503); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_505 = SEQN(2, seq_495, op_ASSIGN_504); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_253, seq_505); + return instruction_sequence; +} + +// Rxx -= cmpy(Rs,Rt*):<<1:sat +RzILOpEffect *hex_il_op_m2_cnacsc_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_171 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_32 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_34 = LOGAND(op_RSHIFT_32, SN(32, 0xffff)); + RzILOpPure *op_MUL_37 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_25), DUP(op_AND_25))), CAST(16, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(CAST(16, MSB(op_AND_34), DUP(op_AND_34))), CAST(16, MSB(DUP(op_AND_34)), DUP(op_AND_34)))); + RzILOpPure *op_LSHIFT_40 = SHIFTL0(CAST(64, MSB(op_MUL_37), DUP(op_MUL_37)), SN(32, 1)); + RzILOpPure *op_RSHIFT_44 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_46 = LOGAND(op_RSHIFT_44, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_52 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_54 = LOGAND(op_RSHIFT_52, SN(32, 0xffff)); + RzILOpPure *op_MUL_57 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_46), DUP(op_AND_46))), CAST(16, MSB(DUP(op_AND_46)), DUP(op_AND_46))), CAST(32, MSB(CAST(16, MSB(op_AND_54), DUP(op_AND_54))), CAST(16, MSB(DUP(op_AND_54)), DUP(op_AND_54)))); + RzILOpPure *op_LSHIFT_60 = SHIFTL0(CAST(64, MSB(op_MUL_57), DUP(op_MUL_57)), SN(32, 1)); + RzILOpPure *op_SUB_61 = SUB(op_LSHIFT_40, op_LSHIFT_60); + RzILOpPure *op_SUB_62 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_16), DUP(op_AND_16))), CAST(32, MSB(DUP(op_AND_16)), DUP(op_AND_16))), op_SUB_61); + RzILOpPure *op_RSHIFT_71 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_73 = LOGAND(op_RSHIFT_71, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_79 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_81 = LOGAND(op_RSHIFT_79, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_87 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_89 = LOGAND(op_RSHIFT_87, SN(32, 0xffff)); + RzILOpPure *op_MUL_92 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_81), DUP(op_AND_81))), CAST(16, MSB(DUP(op_AND_81)), DUP(op_AND_81))), CAST(32, MSB(CAST(16, MSB(op_AND_89), DUP(op_AND_89))), CAST(16, MSB(DUP(op_AND_89)), DUP(op_AND_89)))); + RzILOpPure *op_LSHIFT_95 = SHIFTL0(CAST(64, MSB(op_MUL_92), DUP(op_MUL_92)), SN(32, 1)); + RzILOpPure *op_RSHIFT_99 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_101 = LOGAND(op_RSHIFT_99, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_107 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_109 = LOGAND(op_RSHIFT_107, SN(32, 0xffff)); + RzILOpPure *op_MUL_112 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_101), DUP(op_AND_101))), CAST(16, MSB(DUP(op_AND_101)), DUP(op_AND_101))), CAST(32, MSB(CAST(16, MSB(op_AND_109), DUP(op_AND_109))), CAST(16, MSB(DUP(op_AND_109)), DUP(op_AND_109)))); + RzILOpPure *op_LSHIFT_115 = SHIFTL0(CAST(64, MSB(op_MUL_112), DUP(op_MUL_112)), SN(32, 1)); + RzILOpPure *op_SUB_116 = SUB(op_LSHIFT_95, op_LSHIFT_115); + RzILOpPure *op_SUB_117 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_73), DUP(op_AND_73))), CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73))), op_SUB_116); + RzILOpPure *op_EQ_118 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_62), SN(32, 0), SN(32, 0x20)), op_SUB_117); + RzILOpPure *op_RSHIFT_175 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_177 = LOGAND(op_RSHIFT_175, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_183 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_185 = LOGAND(op_RSHIFT_183, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_191 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_193 = LOGAND(op_RSHIFT_191, SN(32, 0xffff)); + RzILOpPure *op_MUL_196 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_185), DUP(op_AND_185))), CAST(16, MSB(DUP(op_AND_185)), DUP(op_AND_185))), CAST(32, MSB(CAST(16, MSB(op_AND_193), DUP(op_AND_193))), CAST(16, MSB(DUP(op_AND_193)), DUP(op_AND_193)))); + RzILOpPure *op_LSHIFT_199 = SHIFTL0(CAST(64, MSB(op_MUL_196), DUP(op_MUL_196)), SN(32, 1)); + RzILOpPure *op_RSHIFT_203 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_205 = LOGAND(op_RSHIFT_203, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_211 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_213 = LOGAND(op_RSHIFT_211, SN(32, 0xffff)); + RzILOpPure *op_MUL_216 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_205), DUP(op_AND_205))), CAST(16, MSB(DUP(op_AND_205)), DUP(op_AND_205))), CAST(32, MSB(CAST(16, MSB(op_AND_213), DUP(op_AND_213))), CAST(16, MSB(DUP(op_AND_213)), DUP(op_AND_213)))); + RzILOpPure *op_LSHIFT_219 = SHIFTL0(CAST(64, MSB(op_MUL_216), DUP(op_MUL_216)), SN(32, 1)); + RzILOpPure *op_SUB_220 = SUB(op_LSHIFT_199, op_LSHIFT_219); + RzILOpPure *op_SUB_221 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_177), DUP(op_AND_177))), CAST(32, MSB(DUP(op_AND_177)), DUP(op_AND_177))), op_SUB_220); + RzILOpPure *op_LT_224 = SLT(op_SUB_221, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_229 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_230 = NEG(op_LSHIFT_229); + RzILOpPure *op_LSHIFT_235 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_238 = SUB(op_LSHIFT_235, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_239 = ITE(op_LT_224, op_NEG_230, op_SUB_238); + RzILOpEffect *gcc_expr_240 = BRANCH(op_EQ_118, EMPTY(), set_usr_field_call_171); + + // h_tmp262 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_242 = SETL("h_tmp262", cond_239); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_243 = SEQN(2, gcc_expr_240, op_ASSIGN_hybrid_tmp_242); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)) ? ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) : h_tmp262) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_122 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_124 = LOGAND(op_RSHIFT_122, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_130 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_132 = LOGAND(op_RSHIFT_130, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_138 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_140 = LOGAND(op_RSHIFT_138, SN(32, 0xffff)); + RzILOpPure *op_MUL_143 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_132), DUP(op_AND_132))), CAST(16, MSB(DUP(op_AND_132)), DUP(op_AND_132))), CAST(32, MSB(CAST(16, MSB(op_AND_140), DUP(op_AND_140))), CAST(16, MSB(DUP(op_AND_140)), DUP(op_AND_140)))); + RzILOpPure *op_LSHIFT_146 = SHIFTL0(CAST(64, MSB(op_MUL_143), DUP(op_MUL_143)), SN(32, 1)); + RzILOpPure *op_RSHIFT_150 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_152 = LOGAND(op_RSHIFT_150, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_158 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_160 = LOGAND(op_RSHIFT_158, SN(32, 0xffff)); + RzILOpPure *op_MUL_163 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_152), DUP(op_AND_152))), CAST(16, MSB(DUP(op_AND_152)), DUP(op_AND_152))), CAST(32, MSB(CAST(16, MSB(op_AND_160), DUP(op_AND_160))), CAST(16, MSB(DUP(op_AND_160)), DUP(op_AND_160)))); + RzILOpPure *op_LSHIFT_166 = SHIFTL0(CAST(64, MSB(op_MUL_163), DUP(op_MUL_163)), SN(32, 1)); + RzILOpPure *op_SUB_167 = SUB(op_LSHIFT_146, op_LSHIFT_166); + RzILOpPure *op_SUB_168 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_124), DUP(op_AND_124))), CAST(32, MSB(DUP(op_AND_124)), DUP(op_AND_124))), op_SUB_167); + RzILOpPure *cond_244 = ITE(DUP(op_EQ_118), op_SUB_168, VARL("h_tmp262")); + RzILOpPure *op_AND_246 = LOGAND(cond_244, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_250 = SHIFTL0(op_AND_246, SN(32, 0x20)); + RzILOpPure *op_OR_251 = LOGOR(op_AND_7, op_LSHIFT_250); + RzILOpEffect *op_ASSIGN_252 = WRITE_REG(bundle, Rxx_op, op_OR_251); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_253 = SEQN(2, seq_243, op_ASSIGN_252); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_423 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_268 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_270 = LOGAND(op_RSHIFT_268, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_276 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_278 = LOGAND(op_RSHIFT_276, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_284 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_286 = LOGAND(op_RSHIFT_284, SN(32, 0xffff)); + RzILOpPure *op_MUL_289 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_278), DUP(op_AND_278))), CAST(16, MSB(DUP(op_AND_278)), DUP(op_AND_278))), CAST(32, MSB(CAST(16, MSB(op_AND_286), DUP(op_AND_286))), CAST(16, MSB(DUP(op_AND_286)), DUP(op_AND_286)))); + RzILOpPure *op_LSHIFT_292 = SHIFTL0(CAST(64, MSB(op_MUL_289), DUP(op_MUL_289)), SN(32, 1)); + RzILOpPure *op_RSHIFT_296 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_298 = LOGAND(op_RSHIFT_296, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_304 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_306 = LOGAND(op_RSHIFT_304, SN(32, 0xffff)); + RzILOpPure *op_MUL_309 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_298), DUP(op_AND_298))), CAST(16, MSB(DUP(op_AND_298)), DUP(op_AND_298))), CAST(32, MSB(CAST(16, MSB(op_AND_306), DUP(op_AND_306))), CAST(16, MSB(DUP(op_AND_306)), DUP(op_AND_306)))); + RzILOpPure *op_LSHIFT_312 = SHIFTL0(CAST(64, MSB(op_MUL_309), DUP(op_MUL_309)), SN(32, 1)); + RzILOpPure *op_ADD_313 = ADD(op_LSHIFT_292, op_LSHIFT_312); + RzILOpPure *op_SUB_314 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_270), DUP(op_AND_270))), CAST(32, MSB(DUP(op_AND_270)), DUP(op_AND_270))), op_ADD_313); + RzILOpPure *op_RSHIFT_323 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_325 = LOGAND(op_RSHIFT_323, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_331 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_333 = LOGAND(op_RSHIFT_331, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_339 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_341 = LOGAND(op_RSHIFT_339, SN(32, 0xffff)); + RzILOpPure *op_MUL_344 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_333), DUP(op_AND_333))), CAST(16, MSB(DUP(op_AND_333)), DUP(op_AND_333))), CAST(32, MSB(CAST(16, MSB(op_AND_341), DUP(op_AND_341))), CAST(16, MSB(DUP(op_AND_341)), DUP(op_AND_341)))); + RzILOpPure *op_LSHIFT_347 = SHIFTL0(CAST(64, MSB(op_MUL_344), DUP(op_MUL_344)), SN(32, 1)); + RzILOpPure *op_RSHIFT_351 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_353 = LOGAND(op_RSHIFT_351, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_359 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_361 = LOGAND(op_RSHIFT_359, SN(32, 0xffff)); + RzILOpPure *op_MUL_364 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_353), DUP(op_AND_353))), CAST(16, MSB(DUP(op_AND_353)), DUP(op_AND_353))), CAST(32, MSB(CAST(16, MSB(op_AND_361), DUP(op_AND_361))), CAST(16, MSB(DUP(op_AND_361)), DUP(op_AND_361)))); + RzILOpPure *op_LSHIFT_367 = SHIFTL0(CAST(64, MSB(op_MUL_364), DUP(op_MUL_364)), SN(32, 1)); + RzILOpPure *op_ADD_368 = ADD(op_LSHIFT_347, op_LSHIFT_367); + RzILOpPure *op_SUB_369 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_325), DUP(op_AND_325))), CAST(32, MSB(DUP(op_AND_325)), DUP(op_AND_325))), op_ADD_368); + RzILOpPure *op_EQ_370 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_314), SN(32, 0), SN(32, 0x20)), op_SUB_369); + RzILOpPure *op_RSHIFT_427 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_429 = LOGAND(op_RSHIFT_427, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_435 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_437 = LOGAND(op_RSHIFT_435, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_443 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_445 = LOGAND(op_RSHIFT_443, SN(32, 0xffff)); + RzILOpPure *op_MUL_448 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_437), DUP(op_AND_437))), CAST(16, MSB(DUP(op_AND_437)), DUP(op_AND_437))), CAST(32, MSB(CAST(16, MSB(op_AND_445), DUP(op_AND_445))), CAST(16, MSB(DUP(op_AND_445)), DUP(op_AND_445)))); + RzILOpPure *op_LSHIFT_451 = SHIFTL0(CAST(64, MSB(op_MUL_448), DUP(op_MUL_448)), SN(32, 1)); + RzILOpPure *op_RSHIFT_455 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_457 = LOGAND(op_RSHIFT_455, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_463 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_465 = LOGAND(op_RSHIFT_463, SN(32, 0xffff)); + RzILOpPure *op_MUL_468 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_457), DUP(op_AND_457))), CAST(16, MSB(DUP(op_AND_457)), DUP(op_AND_457))), CAST(32, MSB(CAST(16, MSB(op_AND_465), DUP(op_AND_465))), CAST(16, MSB(DUP(op_AND_465)), DUP(op_AND_465)))); + RzILOpPure *op_LSHIFT_471 = SHIFTL0(CAST(64, MSB(op_MUL_468), DUP(op_MUL_468)), SN(32, 1)); + RzILOpPure *op_ADD_472 = ADD(op_LSHIFT_451, op_LSHIFT_471); + RzILOpPure *op_SUB_473 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_429), DUP(op_AND_429))), CAST(32, MSB(DUP(op_AND_429)), DUP(op_AND_429))), op_ADD_472); + RzILOpPure *op_LT_476 = SLT(op_SUB_473, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_481 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_482 = NEG(op_LSHIFT_481); + RzILOpPure *op_LSHIFT_487 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_490 = SUB(op_LSHIFT_487, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_491 = ITE(op_LT_476, op_NEG_482, op_SUB_490); + RzILOpEffect *gcc_expr_492 = BRANCH(op_EQ_370, EMPTY(), set_usr_field_call_423); + + // h_tmp263 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_494 = SETL("h_tmp263", cond_491); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_495 = SEQN(2, gcc_expr_492, op_ASSIGN_hybrid_tmp_494); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)) ? ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) : h_tmp263) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_259 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_260 = LOGNOT(op_LSHIFT_259); + RzILOpPure *op_AND_261 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_260); + RzILOpPure *op_RSHIFT_374 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_376 = LOGAND(op_RSHIFT_374, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_382 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_384 = LOGAND(op_RSHIFT_382, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_390 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_392 = LOGAND(op_RSHIFT_390, SN(32, 0xffff)); + RzILOpPure *op_MUL_395 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_384), DUP(op_AND_384))), CAST(16, MSB(DUP(op_AND_384)), DUP(op_AND_384))), CAST(32, MSB(CAST(16, MSB(op_AND_392), DUP(op_AND_392))), CAST(16, MSB(DUP(op_AND_392)), DUP(op_AND_392)))); + RzILOpPure *op_LSHIFT_398 = SHIFTL0(CAST(64, MSB(op_MUL_395), DUP(op_MUL_395)), SN(32, 1)); + RzILOpPure *op_RSHIFT_402 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_404 = LOGAND(op_RSHIFT_402, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_410 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_412 = LOGAND(op_RSHIFT_410, SN(32, 0xffff)); + RzILOpPure *op_MUL_415 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_404), DUP(op_AND_404))), CAST(16, MSB(DUP(op_AND_404)), DUP(op_AND_404))), CAST(32, MSB(CAST(16, MSB(op_AND_412), DUP(op_AND_412))), CAST(16, MSB(DUP(op_AND_412)), DUP(op_AND_412)))); + RzILOpPure *op_LSHIFT_418 = SHIFTL0(CAST(64, MSB(op_MUL_415), DUP(op_MUL_415)), SN(32, 1)); + RzILOpPure *op_ADD_419 = ADD(op_LSHIFT_398, op_LSHIFT_418); + RzILOpPure *op_SUB_420 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_376), DUP(op_AND_376))), CAST(32, MSB(DUP(op_AND_376)), DUP(op_AND_376))), op_ADD_419); + RzILOpPure *cond_496 = ITE(DUP(op_EQ_370), op_SUB_420, VARL("h_tmp263")); + RzILOpPure *op_AND_498 = LOGAND(cond_496, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_502 = SHIFTL0(op_AND_498, SN(32, 0)); + RzILOpPure *op_OR_503 = LOGOR(op_AND_261, op_LSHIFT_502); + RzILOpEffect *op_ASSIGN_504 = WRITE_REG(bundle, Rxx_op, op_OR_503); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_505 = SEQN(2, seq_495, op_ASSIGN_504); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_253, seq_505); + return instruction_sequence; +} + +// Rxx += mpy(Rs,Rt) +RzILOpEffect *hex_il_op_m2_dpmpyss_acc_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = Rxx + ((st64) Rs) * ((st64) Rt); + RzILOpPure *op_MUL_5 = MUL(CAST(64, MSB(Rs), DUP(Rs)), CAST(64, MSB(Rt), DUP(Rt))); + RzILOpPure *op_ADD_6 = ADD(READ_REG(pkt, Rxx_op, false), op_MUL_5); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rxx_op, op_ADD_6); + + RzILOpEffect *instruction_sequence = op_ASSIGN_7; + return instruction_sequence; +} + +// Rxx -= mpy(Rs,Rt) +RzILOpEffect *hex_il_op_m2_dpmpyss_nac_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = Rxx - ((st64) Rs) * ((st64) Rt); + RzILOpPure *op_MUL_5 = MUL(CAST(64, MSB(Rs), DUP(Rs)), CAST(64, MSB(Rt), DUP(Rt))); + RzILOpPure *op_SUB_6 = SUB(READ_REG(pkt, Rxx_op, false), op_MUL_5); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rxx_op, op_SUB_6); + + RzILOpEffect *instruction_sequence = op_ASSIGN_7; + return instruction_sequence; +} + +// Rd = mpy(Rs,Rt):rnd +RzILOpEffect *hex_il_op_m2_dpmpyss_rnd_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((st32) (((st64) Rs) * ((st64) Rt) + 0x80000000 >> 0x20)); + RzILOpPure *op_MUL_5 = MUL(CAST(64, MSB(Rs), DUP(Rs)), CAST(64, MSB(Rt), DUP(Rt))); + RzILOpPure *op_ADD_7 = ADD(op_MUL_5, SN(64, 0x80000000)); + RzILOpPure *op_RSHIFT_9 = SHIFTRA(op_ADD_7, SN(32, 0x20)); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(op_RSHIFT_9), DUP(op_RSHIFT_9))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_11; + return instruction_sequence; +} + +// Rdd = mpy(Rs,Rt) +RzILOpEffect *hex_il_op_m2_dpmpyss_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rdd = ((st64) Rs) * ((st64) Rt); + RzILOpPure *op_MUL_5 = MUL(CAST(64, MSB(Rs), DUP(Rs)), CAST(64, MSB(Rt), DUP(Rt))); + RzILOpEffect *op_ASSIGN_6 = WRITE_REG(bundle, Rdd_op, op_MUL_5); + + RzILOpEffect *instruction_sequence = op_ASSIGN_6; + return instruction_sequence; +} + +// Rxx += mpyu(Rs,Rt) +RzILOpEffect *hex_il_op_m2_dpmpyuu_acc_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = ((st64) ((ut64) Rxx) + ((ut64) ((ut32) Rs)) * ((ut64) ((ut32) Rt))); + RzILOpPure *op_MUL_7 = MUL(CAST(64, IL_FALSE, CAST(32, IL_FALSE, Rs)), CAST(64, IL_FALSE, CAST(32, IL_FALSE, Rt))); + RzILOpPure *op_ADD_9 = ADD(CAST(64, IL_FALSE, READ_REG(pkt, Rxx_op, false)), op_MUL_7); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_ADD_9)); + + RzILOpEffect *instruction_sequence = op_ASSIGN_11; + return instruction_sequence; +} + +// Rxx -= mpyu(Rs,Rt) +RzILOpEffect *hex_il_op_m2_dpmpyuu_nac_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = ((st64) ((ut64) Rxx) - ((ut64) ((ut32) Rs)) * ((ut64) ((ut32) Rt))); + RzILOpPure *op_MUL_7 = MUL(CAST(64, IL_FALSE, CAST(32, IL_FALSE, Rs)), CAST(64, IL_FALSE, CAST(32, IL_FALSE, Rt))); + RzILOpPure *op_SUB_9 = SUB(CAST(64, IL_FALSE, READ_REG(pkt, Rxx_op, false)), op_MUL_7); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_SUB_9)); + + RzILOpEffect *instruction_sequence = op_ASSIGN_11; + return instruction_sequence; +} + +// Rdd = mpyu(Rs,Rt) +RzILOpEffect *hex_il_op_m2_dpmpyuu_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rdd = ((st64) ((ut64) ((ut32) Rs)) * ((ut64) ((ut32) Rt))); + RzILOpPure *op_MUL_7 = MUL(CAST(64, IL_FALSE, CAST(32, IL_FALSE, Rs)), CAST(64, IL_FALSE, CAST(32, IL_FALSE, Rt))); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_MUL_7)); + + RzILOpEffect *instruction_sequence = op_ASSIGN_9; + return instruction_sequence; +} + +// Rd = mpy(Rs,Rt.h):<<1:rnd:sat +RzILOpEffect *hex_il_op_m2_hmmpyh_rs1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_86 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) Rs) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) Rs) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) Rs) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_13 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_13, SN(32, 0xffff)); + RzILOpPure *op_MUL_22 = MUL(CAST(64, MSB(Rs), DUP(Rs)), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_15), DUP(op_AND_15))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(op_MUL_22, SN(32, 1)); + RzILOpPure *op_ADD_27 = ADD(op_LSHIFT_24, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_29 = SHIFTRA(op_ADD_27, SN(32, 16)); + RzILOpPure *op_RSHIFT_42 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_44 = LOGAND(op_RSHIFT_42, SN(32, 0xffff)); + RzILOpPure *op_MUL_51 = MUL(CAST(64, MSB(DUP(Rs)), DUP(Rs)), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_44), DUP(op_AND_44))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_53 = SHIFTL0(op_MUL_51, SN(32, 1)); + RzILOpPure *op_ADD_56 = ADD(op_LSHIFT_53, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_58 = SHIFTRA(op_ADD_56, SN(32, 16)); + RzILOpPure *op_EQ_59 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_29), SN(32, 0), SN(32, 0x20)), op_RSHIFT_58); + RzILOpPure *op_RSHIFT_94 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_96 = LOGAND(op_RSHIFT_94, SN(32, 0xffff)); + RzILOpPure *op_MUL_103 = MUL(CAST(64, MSB(DUP(Rs)), DUP(Rs)), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_96), DUP(op_AND_96))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_105 = SHIFTL0(op_MUL_103, SN(32, 1)); + RzILOpPure *op_ADD_108 = ADD(op_LSHIFT_105, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_110 = SHIFTRA(op_ADD_108, SN(32, 16)); + RzILOpPure *op_LT_113 = SLT(op_RSHIFT_110, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_118 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_119 = NEG(op_LSHIFT_118); + RzILOpPure *op_LSHIFT_124 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_127 = SUB(op_LSHIFT_124, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_128 = ITE(op_LT_113, op_NEG_119, op_SUB_127); + RzILOpEffect *gcc_expr_129 = BRANCH(op_EQ_59, EMPTY(), set_usr_field_call_86); + + // h_tmp264 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) Rs) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) Rs) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) Rs) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_131 = SETL("h_tmp264", cond_128); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) Rs) * sextrac ...; + RzILOpEffect *seq_132 = SEQN(2, gcc_expr_129, op_ASSIGN_hybrid_tmp_131); + + // Rd = ((st32) ((sextract64(((ut64) ((((st64) Rs) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) Rs) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)) ? ((((st64) Rs) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10) : h_tmp264)); + RzILOpPure *op_RSHIFT_67 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_69 = LOGAND(op_RSHIFT_67, SN(32, 0xffff)); + RzILOpPure *op_MUL_76 = MUL(CAST(64, MSB(DUP(Rs)), DUP(Rs)), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_69), DUP(op_AND_69))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_78 = SHIFTL0(op_MUL_76, SN(32, 1)); + RzILOpPure *op_ADD_81 = ADD(op_LSHIFT_78, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_83 = SHIFTRA(op_ADD_81, SN(32, 16)); + RzILOpPure *cond_133 = ITE(DUP(op_EQ_59), op_RSHIFT_83, VARL("h_tmp264")); + RzILOpEffect *op_ASSIGN_135 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_133), DUP(cond_133))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) Rs) * sex ...; + RzILOpEffect *seq_136 = SEQN(2, seq_132, op_ASSIGN_135); + + RzILOpEffect *instruction_sequence = seq_136; + return instruction_sequence; +} + +// Rd = mpy(Rs,Rt.h):<<1:sat +RzILOpEffect *hex_il_op_m2_hmmpyh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_77 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) Rs) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) << 0x1) >> 0x10)), 0x0, 0x20) == ((((st64) Rs) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) << 0x1) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) Rs) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) << 0x1) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_13 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_13, SN(32, 0xffff)); + RzILOpPure *op_MUL_22 = MUL(CAST(64, MSB(Rs), DUP(Rs)), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_15), DUP(op_AND_15))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(op_MUL_22, SN(32, 1)); + RzILOpPure *op_RSHIFT_26 = SHIFTRA(op_LSHIFT_24, SN(32, 16)); + RzILOpPure *op_RSHIFT_39 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_41 = LOGAND(op_RSHIFT_39, SN(32, 0xffff)); + RzILOpPure *op_MUL_48 = MUL(CAST(64, MSB(DUP(Rs)), DUP(Rs)), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_41), DUP(op_AND_41))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_50 = SHIFTL0(op_MUL_48, SN(32, 1)); + RzILOpPure *op_RSHIFT_52 = SHIFTRA(op_LSHIFT_50, SN(32, 16)); + RzILOpPure *op_EQ_53 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_26), SN(32, 0), SN(32, 0x20)), op_RSHIFT_52); + RzILOpPure *op_RSHIFT_85 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_87 = LOGAND(op_RSHIFT_85, SN(32, 0xffff)); + RzILOpPure *op_MUL_94 = MUL(CAST(64, MSB(DUP(Rs)), DUP(Rs)), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_87), DUP(op_AND_87))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_96 = SHIFTL0(op_MUL_94, SN(32, 1)); + RzILOpPure *op_RSHIFT_98 = SHIFTRA(op_LSHIFT_96, SN(32, 16)); + RzILOpPure *op_LT_101 = SLT(op_RSHIFT_98, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_106 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_107 = NEG(op_LSHIFT_106); + RzILOpPure *op_LSHIFT_112 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_115 = SUB(op_LSHIFT_112, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_116 = ITE(op_LT_101, op_NEG_107, op_SUB_115); + RzILOpEffect *gcc_expr_117 = BRANCH(op_EQ_53, EMPTY(), set_usr_field_call_77); + + // h_tmp265 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) Rs) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) << 0x1) >> 0x10)), 0x0, 0x20) == ((((st64) Rs) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) << 0x1) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) Rs) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) << 0x1) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_119 = SETL("h_tmp265", cond_116); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) Rs) * sextrac ...; + RzILOpEffect *seq_120 = SEQN(2, gcc_expr_117, op_ASSIGN_hybrid_tmp_119); + + // Rd = ((st32) ((sextract64(((ut64) ((((st64) Rs) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) << 0x1) >> 0x10)), 0x0, 0x20) == ((((st64) Rs) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) << 0x1) >> 0x10)) ? ((((st64) Rs) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) << 0x1) >> 0x10) : h_tmp265)); + RzILOpPure *op_RSHIFT_61 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_63 = LOGAND(op_RSHIFT_61, SN(32, 0xffff)); + RzILOpPure *op_MUL_70 = MUL(CAST(64, MSB(DUP(Rs)), DUP(Rs)), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_63), DUP(op_AND_63))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_72 = SHIFTL0(op_MUL_70, SN(32, 1)); + RzILOpPure *op_RSHIFT_74 = SHIFTRA(op_LSHIFT_72, SN(32, 16)); + RzILOpPure *cond_121 = ITE(DUP(op_EQ_53), op_RSHIFT_74, VARL("h_tmp265")); + RzILOpEffect *op_ASSIGN_123 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_121), DUP(cond_121))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) Rs) * sex ...; + RzILOpEffect *seq_124 = SEQN(2, seq_120, op_ASSIGN_123); + + RzILOpEffect *instruction_sequence = seq_124; + return instruction_sequence; +} + +// Rd = mpy(Rs,Rt.l):<<1:rnd:sat +RzILOpEffect *hex_il_op_m2_hmmpyl_rs1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_86 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) Rs) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) Rs) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) Rs) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_13 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_13, SN(32, 0xffff)); + RzILOpPure *op_MUL_22 = MUL(CAST(64, MSB(Rs), DUP(Rs)), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_15), DUP(op_AND_15))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(op_MUL_22, SN(32, 1)); + RzILOpPure *op_ADD_27 = ADD(op_LSHIFT_24, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_29 = SHIFTRA(op_ADD_27, SN(32, 16)); + RzILOpPure *op_RSHIFT_42 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_44 = LOGAND(op_RSHIFT_42, SN(32, 0xffff)); + RzILOpPure *op_MUL_51 = MUL(CAST(64, MSB(DUP(Rs)), DUP(Rs)), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_44), DUP(op_AND_44))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_53 = SHIFTL0(op_MUL_51, SN(32, 1)); + RzILOpPure *op_ADD_56 = ADD(op_LSHIFT_53, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_58 = SHIFTRA(op_ADD_56, SN(32, 16)); + RzILOpPure *op_EQ_59 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_29), SN(32, 0), SN(32, 0x20)), op_RSHIFT_58); + RzILOpPure *op_RSHIFT_94 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_96 = LOGAND(op_RSHIFT_94, SN(32, 0xffff)); + RzILOpPure *op_MUL_103 = MUL(CAST(64, MSB(DUP(Rs)), DUP(Rs)), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_96), DUP(op_AND_96))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_105 = SHIFTL0(op_MUL_103, SN(32, 1)); + RzILOpPure *op_ADD_108 = ADD(op_LSHIFT_105, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_110 = SHIFTRA(op_ADD_108, SN(32, 16)); + RzILOpPure *op_LT_113 = SLT(op_RSHIFT_110, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_118 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_119 = NEG(op_LSHIFT_118); + RzILOpPure *op_LSHIFT_124 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_127 = SUB(op_LSHIFT_124, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_128 = ITE(op_LT_113, op_NEG_119, op_SUB_127); + RzILOpEffect *gcc_expr_129 = BRANCH(op_EQ_59, EMPTY(), set_usr_field_call_86); + + // h_tmp266 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) Rs) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) Rs) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) Rs) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_131 = SETL("h_tmp266", cond_128); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) Rs) * sextrac ...; + RzILOpEffect *seq_132 = SEQN(2, gcc_expr_129, op_ASSIGN_hybrid_tmp_131); + + // Rd = ((st32) ((sextract64(((ut64) ((((st64) Rs) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) Rs) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)) ? ((((st64) Rs) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10) : h_tmp266)); + RzILOpPure *op_RSHIFT_67 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_69 = LOGAND(op_RSHIFT_67, SN(32, 0xffff)); + RzILOpPure *op_MUL_76 = MUL(CAST(64, MSB(DUP(Rs)), DUP(Rs)), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_69), DUP(op_AND_69))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_78 = SHIFTL0(op_MUL_76, SN(32, 1)); + RzILOpPure *op_ADD_81 = ADD(op_LSHIFT_78, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_83 = SHIFTRA(op_ADD_81, SN(32, 16)); + RzILOpPure *cond_133 = ITE(DUP(op_EQ_59), op_RSHIFT_83, VARL("h_tmp266")); + RzILOpEffect *op_ASSIGN_135 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_133), DUP(cond_133))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) Rs) * sex ...; + RzILOpEffect *seq_136 = SEQN(2, seq_132, op_ASSIGN_135); + + RzILOpEffect *instruction_sequence = seq_136; + return instruction_sequence; +} + +// Rd = mpy(Rs,Rt.l):<<1:sat +RzILOpEffect *hex_il_op_m2_hmmpyl_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_77 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) Rs) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) << 0x1) >> 0x10)), 0x0, 0x20) == ((((st64) Rs) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) << 0x1) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) Rs) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) << 0x1) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_13 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_13, SN(32, 0xffff)); + RzILOpPure *op_MUL_22 = MUL(CAST(64, MSB(Rs), DUP(Rs)), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_15), DUP(op_AND_15))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(op_MUL_22, SN(32, 1)); + RzILOpPure *op_RSHIFT_26 = SHIFTRA(op_LSHIFT_24, SN(32, 16)); + RzILOpPure *op_RSHIFT_39 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_41 = LOGAND(op_RSHIFT_39, SN(32, 0xffff)); + RzILOpPure *op_MUL_48 = MUL(CAST(64, MSB(DUP(Rs)), DUP(Rs)), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_41), DUP(op_AND_41))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_50 = SHIFTL0(op_MUL_48, SN(32, 1)); + RzILOpPure *op_RSHIFT_52 = SHIFTRA(op_LSHIFT_50, SN(32, 16)); + RzILOpPure *op_EQ_53 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_26), SN(32, 0), SN(32, 0x20)), op_RSHIFT_52); + RzILOpPure *op_RSHIFT_85 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_87 = LOGAND(op_RSHIFT_85, SN(32, 0xffff)); + RzILOpPure *op_MUL_94 = MUL(CAST(64, MSB(DUP(Rs)), DUP(Rs)), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_87), DUP(op_AND_87))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_96 = SHIFTL0(op_MUL_94, SN(32, 1)); + RzILOpPure *op_RSHIFT_98 = SHIFTRA(op_LSHIFT_96, SN(32, 16)); + RzILOpPure *op_LT_101 = SLT(op_RSHIFT_98, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_106 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_107 = NEG(op_LSHIFT_106); + RzILOpPure *op_LSHIFT_112 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_115 = SUB(op_LSHIFT_112, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_116 = ITE(op_LT_101, op_NEG_107, op_SUB_115); + RzILOpEffect *gcc_expr_117 = BRANCH(op_EQ_53, EMPTY(), set_usr_field_call_77); + + // h_tmp267 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) Rs) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) << 0x1) >> 0x10)), 0x0, 0x20) == ((((st64) Rs) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) << 0x1) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) Rs) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) << 0x1) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_119 = SETL("h_tmp267", cond_116); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) Rs) * sextrac ...; + RzILOpEffect *seq_120 = SEQN(2, gcc_expr_117, op_ASSIGN_hybrid_tmp_119); + + // Rd = ((st32) ((sextract64(((ut64) ((((st64) Rs) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) << 0x1) >> 0x10)), 0x0, 0x20) == ((((st64) Rs) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) << 0x1) >> 0x10)) ? ((((st64) Rs) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) << 0x1) >> 0x10) : h_tmp267)); + RzILOpPure *op_RSHIFT_61 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_63 = LOGAND(op_RSHIFT_61, SN(32, 0xffff)); + RzILOpPure *op_MUL_70 = MUL(CAST(64, MSB(DUP(Rs)), DUP(Rs)), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_63), DUP(op_AND_63))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_72 = SHIFTL0(op_MUL_70, SN(32, 1)); + RzILOpPure *op_RSHIFT_74 = SHIFTRA(op_LSHIFT_72, SN(32, 16)); + RzILOpPure *cond_121 = ITE(DUP(op_EQ_53), op_RSHIFT_74, VARL("h_tmp267")); + RzILOpEffect *op_ASSIGN_123 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_121), DUP(cond_121))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) Rs) * sex ...; + RzILOpEffect *seq_124 = SEQN(2, seq_120, op_ASSIGN_123); + + RzILOpEffect *instruction_sequence = seq_124; + return instruction_sequence; +} + +// Rx += mpyi(Rs,Rt) +RzILOpEffect *hex_il_op_m2_maci(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = Rx + Rs * Rt; + RzILOpPure *op_MUL_3 = MUL(Rs, Rt); + RzILOpPure *op_ADD_4 = ADD(READ_REG(pkt, Rx_op, false), op_MUL_3); + RzILOpEffect *op_ASSIGN_5 = WRITE_REG(bundle, Rx_op, op_ADD_4); + + RzILOpEffect *instruction_sequence = op_ASSIGN_5; + return instruction_sequence; +} + +// Rx -= mpyi(Rs,Ii) +RzILOpEffect *hex_il_op_m2_macsin(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // Rx = ((st32) ((ut32) Rx) - ((ut32) Rs) * u); + RzILOpPure *op_MUL_5 = MUL(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpPure *op_SUB_7 = SUB(CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false)), op_MUL_5); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_SUB_7)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_0, op_ASSIGN_9); + return instruction_sequence; +} + +// Rx += mpyi(Rs,Ii) +RzILOpEffect *hex_il_op_m2_macsip(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // Rx = ((st32) ((ut32) Rx) + ((ut32) Rs) * u); + RzILOpPure *op_MUL_5 = MUL(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpPure *op_ADD_7 = ADD(CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false)), op_MUL_5); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_ADD_7)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_0, op_ASSIGN_9); + return instruction_sequence; +} + +// Rxx += vmpywoh(Rss,Rtt):rnd:sat +RzILOpEffect *hex_il_op_m2_mmachs_rs0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_150 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(Rtt, SN(32, 0x30)); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_37, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_47 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_25), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_40), DUP(op_AND_40))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_49 = SHIFTL0(op_MUL_47, SN(32, 0)); + RzILOpPure *op_ADD_52 = ADD(op_LSHIFT_49, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_54 = SHIFTRA(op_ADD_52, SN(32, 16)); + RzILOpPure *op_ADD_55 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_16), DUP(op_AND_16))), CAST(32, MSB(DUP(op_AND_16)), DUP(op_AND_16))), op_RSHIFT_54); + RzILOpPure *op_RSHIFT_64 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_66 = LOGAND(op_RSHIFT_64, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_72 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_74 = LOGAND(op_RSHIFT_72, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_85 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_88 = LOGAND(op_RSHIFT_85, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_95 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_74), DUP(op_AND_74))), CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))), CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))), CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))), CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_88), DUP(op_AND_88))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_97 = SHIFTL0(op_MUL_95, SN(32, 0)); + RzILOpPure *op_ADD_100 = ADD(op_LSHIFT_97, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_102 = SHIFTRA(op_ADD_100, SN(32, 16)); + RzILOpPure *op_ADD_103 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_66), DUP(op_AND_66))), CAST(32, MSB(DUP(op_AND_66)), DUP(op_AND_66))), op_RSHIFT_102); + RzILOpPure *op_EQ_104 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_55), SN(32, 0), SN(32, 0x20)), op_ADD_103); + RzILOpPure *op_RSHIFT_154 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_156 = LOGAND(op_RSHIFT_154, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_162 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_164 = LOGAND(op_RSHIFT_162, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_175 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_178 = LOGAND(op_RSHIFT_175, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_185 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_164), DUP(op_AND_164))), CAST(32, MSB(DUP(op_AND_164)), DUP(op_AND_164)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_164)), DUP(op_AND_164))), CAST(32, MSB(DUP(op_AND_164)), DUP(op_AND_164))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_164)), DUP(op_AND_164))), CAST(32, MSB(DUP(op_AND_164)), DUP(op_AND_164)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_164)), DUP(op_AND_164))), CAST(32, MSB(DUP(op_AND_164)), DUP(op_AND_164))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_178), DUP(op_AND_178))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_187 = SHIFTL0(op_MUL_185, SN(32, 0)); + RzILOpPure *op_ADD_190 = ADD(op_LSHIFT_187, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_192 = SHIFTRA(op_ADD_190, SN(32, 16)); + RzILOpPure *op_ADD_193 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_156), DUP(op_AND_156))), CAST(32, MSB(DUP(op_AND_156)), DUP(op_AND_156))), op_RSHIFT_192); + RzILOpPure *op_LT_196 = SLT(op_ADD_193, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_201 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_202 = NEG(op_LSHIFT_201); + RzILOpPure *op_LSHIFT_207 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_210 = SUB(op_LSHIFT_207, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_211 = ITE(op_LT_196, op_NEG_202, op_SUB_210); + RzILOpEffect *gcc_expr_212 = BRANCH(op_EQ_104, EMPTY(), set_usr_field_call_150); + + // h_tmp268 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_214 = SETL("h_tmp268", cond_211); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_215 = SEQN(2, gcc_expr_212, op_ASSIGN_hybrid_tmp_214); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10)) ? ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10) : h_tmp268) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_108 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_110 = LOGAND(op_RSHIFT_108, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_116 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_118 = LOGAND(op_RSHIFT_116, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_129 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_132 = LOGAND(op_RSHIFT_129, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_139 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_118), DUP(op_AND_118))), CAST(32, MSB(DUP(op_AND_118)), DUP(op_AND_118)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_118)), DUP(op_AND_118))), CAST(32, MSB(DUP(op_AND_118)), DUP(op_AND_118))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_118)), DUP(op_AND_118))), CAST(32, MSB(DUP(op_AND_118)), DUP(op_AND_118)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_118)), DUP(op_AND_118))), CAST(32, MSB(DUP(op_AND_118)), DUP(op_AND_118))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_132), DUP(op_AND_132))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_141 = SHIFTL0(op_MUL_139, SN(32, 0)); + RzILOpPure *op_ADD_144 = ADD(op_LSHIFT_141, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_146 = SHIFTRA(op_ADD_144, SN(32, 16)); + RzILOpPure *op_ADD_147 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_110), DUP(op_AND_110))), CAST(32, MSB(DUP(op_AND_110)), DUP(op_AND_110))), op_RSHIFT_146); + RzILOpPure *cond_216 = ITE(DUP(op_EQ_104), op_ADD_147, VARL("h_tmp268")); + RzILOpPure *op_AND_218 = LOGAND(cond_216, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_222 = SHIFTL0(op_AND_218, SN(32, 0x20)); + RzILOpPure *op_OR_223 = LOGOR(op_AND_7, op_LSHIFT_222); + RzILOpEffect *op_ASSIGN_224 = WRITE_REG(bundle, Rxx_op, op_OR_223); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_225 = SEQN(2, seq_215, op_ASSIGN_224); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_374 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_240 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_242 = LOGAND(op_RSHIFT_240, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_248 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_250 = LOGAND(op_RSHIFT_248, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_261 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_264 = LOGAND(op_RSHIFT_261, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_271 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_250), DUP(op_AND_250))), CAST(32, MSB(DUP(op_AND_250)), DUP(op_AND_250)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_250)), DUP(op_AND_250))), CAST(32, MSB(DUP(op_AND_250)), DUP(op_AND_250))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_250)), DUP(op_AND_250))), CAST(32, MSB(DUP(op_AND_250)), DUP(op_AND_250)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_250)), DUP(op_AND_250))), CAST(32, MSB(DUP(op_AND_250)), DUP(op_AND_250))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_264), DUP(op_AND_264))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_273 = SHIFTL0(op_MUL_271, SN(32, 0)); + RzILOpPure *op_ADD_276 = ADD(op_LSHIFT_273, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_278 = SHIFTRA(op_ADD_276, SN(32, 16)); + RzILOpPure *op_ADD_279 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_242), DUP(op_AND_242))), CAST(32, MSB(DUP(op_AND_242)), DUP(op_AND_242))), op_RSHIFT_278); + RzILOpPure *op_RSHIFT_288 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_290 = LOGAND(op_RSHIFT_288, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_296 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_298 = LOGAND(op_RSHIFT_296, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_309 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_312 = LOGAND(op_RSHIFT_309, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_319 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_298), DUP(op_AND_298))), CAST(32, MSB(DUP(op_AND_298)), DUP(op_AND_298)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_298)), DUP(op_AND_298))), CAST(32, MSB(DUP(op_AND_298)), DUP(op_AND_298))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_298)), DUP(op_AND_298))), CAST(32, MSB(DUP(op_AND_298)), DUP(op_AND_298)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_298)), DUP(op_AND_298))), CAST(32, MSB(DUP(op_AND_298)), DUP(op_AND_298))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_312), DUP(op_AND_312))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_321 = SHIFTL0(op_MUL_319, SN(32, 0)); + RzILOpPure *op_ADD_324 = ADD(op_LSHIFT_321, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_326 = SHIFTRA(op_ADD_324, SN(32, 16)); + RzILOpPure *op_ADD_327 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_290), DUP(op_AND_290))), CAST(32, MSB(DUP(op_AND_290)), DUP(op_AND_290))), op_RSHIFT_326); + RzILOpPure *op_EQ_328 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_279), SN(32, 0), SN(32, 0x20)), op_ADD_327); + RzILOpPure *op_RSHIFT_378 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_380 = LOGAND(op_RSHIFT_378, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_386 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_388 = LOGAND(op_RSHIFT_386, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_399 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_402 = LOGAND(op_RSHIFT_399, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_409 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_388), DUP(op_AND_388))), CAST(32, MSB(DUP(op_AND_388)), DUP(op_AND_388)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_388)), DUP(op_AND_388))), CAST(32, MSB(DUP(op_AND_388)), DUP(op_AND_388))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_388)), DUP(op_AND_388))), CAST(32, MSB(DUP(op_AND_388)), DUP(op_AND_388)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_388)), DUP(op_AND_388))), CAST(32, MSB(DUP(op_AND_388)), DUP(op_AND_388))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_402), DUP(op_AND_402))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_411 = SHIFTL0(op_MUL_409, SN(32, 0)); + RzILOpPure *op_ADD_414 = ADD(op_LSHIFT_411, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_416 = SHIFTRA(op_ADD_414, SN(32, 16)); + RzILOpPure *op_ADD_417 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_380), DUP(op_AND_380))), CAST(32, MSB(DUP(op_AND_380)), DUP(op_AND_380))), op_RSHIFT_416); + RzILOpPure *op_LT_420 = SLT(op_ADD_417, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_425 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_426 = NEG(op_LSHIFT_425); + RzILOpPure *op_LSHIFT_431 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_434 = SUB(op_LSHIFT_431, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_435 = ITE(op_LT_420, op_NEG_426, op_SUB_434); + RzILOpEffect *gcc_expr_436 = BRANCH(op_EQ_328, EMPTY(), set_usr_field_call_374); + + // h_tmp269 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_438 = SETL("h_tmp269", cond_435); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_439 = SEQN(2, gcc_expr_436, op_ASSIGN_hybrid_tmp_438); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10)) ? ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10) : h_tmp269) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_231 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_232 = LOGNOT(op_LSHIFT_231); + RzILOpPure *op_AND_233 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_232); + RzILOpPure *op_RSHIFT_332 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_334 = LOGAND(op_RSHIFT_332, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_340 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_342 = LOGAND(op_RSHIFT_340, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_353 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_356 = LOGAND(op_RSHIFT_353, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_363 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_342), DUP(op_AND_342))), CAST(32, MSB(DUP(op_AND_342)), DUP(op_AND_342)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_342)), DUP(op_AND_342))), CAST(32, MSB(DUP(op_AND_342)), DUP(op_AND_342))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_342)), DUP(op_AND_342))), CAST(32, MSB(DUP(op_AND_342)), DUP(op_AND_342)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_342)), DUP(op_AND_342))), CAST(32, MSB(DUP(op_AND_342)), DUP(op_AND_342))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_356), DUP(op_AND_356))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_365 = SHIFTL0(op_MUL_363, SN(32, 0)); + RzILOpPure *op_ADD_368 = ADD(op_LSHIFT_365, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_370 = SHIFTRA(op_ADD_368, SN(32, 16)); + RzILOpPure *op_ADD_371 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_334), DUP(op_AND_334))), CAST(32, MSB(DUP(op_AND_334)), DUP(op_AND_334))), op_RSHIFT_370); + RzILOpPure *cond_440 = ITE(DUP(op_EQ_328), op_ADD_371, VARL("h_tmp269")); + RzILOpPure *op_AND_442 = LOGAND(cond_440, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_446 = SHIFTL0(op_AND_442, SN(32, 0)); + RzILOpPure *op_OR_447 = LOGOR(op_AND_233, op_LSHIFT_446); + RzILOpEffect *op_ASSIGN_448 = WRITE_REG(bundle, Rxx_op, op_OR_447); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_449 = SEQN(2, seq_439, op_ASSIGN_448); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_225, seq_449); + return instruction_sequence; +} + +// Rxx += vmpywoh(Rss,Rtt):<<1:rnd:sat +RzILOpEffect *hex_il_op_m2_mmachs_rs1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_150 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(Rtt, SN(32, 0x30)); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_37, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_47 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_25), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_40), DUP(op_AND_40))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_49 = SHIFTL0(op_MUL_47, SN(32, 1)); + RzILOpPure *op_ADD_52 = ADD(op_LSHIFT_49, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_54 = SHIFTRA(op_ADD_52, SN(32, 16)); + RzILOpPure *op_ADD_55 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_16), DUP(op_AND_16))), CAST(32, MSB(DUP(op_AND_16)), DUP(op_AND_16))), op_RSHIFT_54); + RzILOpPure *op_RSHIFT_64 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_66 = LOGAND(op_RSHIFT_64, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_72 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_74 = LOGAND(op_RSHIFT_72, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_85 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_88 = LOGAND(op_RSHIFT_85, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_95 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_74), DUP(op_AND_74))), CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))), CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))), CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))), CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_88), DUP(op_AND_88))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_97 = SHIFTL0(op_MUL_95, SN(32, 1)); + RzILOpPure *op_ADD_100 = ADD(op_LSHIFT_97, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_102 = SHIFTRA(op_ADD_100, SN(32, 16)); + RzILOpPure *op_ADD_103 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_66), DUP(op_AND_66))), CAST(32, MSB(DUP(op_AND_66)), DUP(op_AND_66))), op_RSHIFT_102); + RzILOpPure *op_EQ_104 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_55), SN(32, 0), SN(32, 0x20)), op_ADD_103); + RzILOpPure *op_RSHIFT_154 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_156 = LOGAND(op_RSHIFT_154, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_162 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_164 = LOGAND(op_RSHIFT_162, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_175 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_178 = LOGAND(op_RSHIFT_175, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_185 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_164), DUP(op_AND_164))), CAST(32, MSB(DUP(op_AND_164)), DUP(op_AND_164)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_164)), DUP(op_AND_164))), CAST(32, MSB(DUP(op_AND_164)), DUP(op_AND_164))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_164)), DUP(op_AND_164))), CAST(32, MSB(DUP(op_AND_164)), DUP(op_AND_164)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_164)), DUP(op_AND_164))), CAST(32, MSB(DUP(op_AND_164)), DUP(op_AND_164))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_178), DUP(op_AND_178))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_187 = SHIFTL0(op_MUL_185, SN(32, 1)); + RzILOpPure *op_ADD_190 = ADD(op_LSHIFT_187, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_192 = SHIFTRA(op_ADD_190, SN(32, 16)); + RzILOpPure *op_ADD_193 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_156), DUP(op_AND_156))), CAST(32, MSB(DUP(op_AND_156)), DUP(op_AND_156))), op_RSHIFT_192); + RzILOpPure *op_LT_196 = SLT(op_ADD_193, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_201 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_202 = NEG(op_LSHIFT_201); + RzILOpPure *op_LSHIFT_207 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_210 = SUB(op_LSHIFT_207, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_211 = ITE(op_LT_196, op_NEG_202, op_SUB_210); + RzILOpEffect *gcc_expr_212 = BRANCH(op_EQ_104, EMPTY(), set_usr_field_call_150); + + // h_tmp270 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_214 = SETL("h_tmp270", cond_211); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_215 = SEQN(2, gcc_expr_212, op_ASSIGN_hybrid_tmp_214); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)) ? ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10) : h_tmp270) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_108 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_110 = LOGAND(op_RSHIFT_108, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_116 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_118 = LOGAND(op_RSHIFT_116, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_129 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_132 = LOGAND(op_RSHIFT_129, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_139 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_118), DUP(op_AND_118))), CAST(32, MSB(DUP(op_AND_118)), DUP(op_AND_118)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_118)), DUP(op_AND_118))), CAST(32, MSB(DUP(op_AND_118)), DUP(op_AND_118))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_118)), DUP(op_AND_118))), CAST(32, MSB(DUP(op_AND_118)), DUP(op_AND_118)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_118)), DUP(op_AND_118))), CAST(32, MSB(DUP(op_AND_118)), DUP(op_AND_118))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_132), DUP(op_AND_132))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_141 = SHIFTL0(op_MUL_139, SN(32, 1)); + RzILOpPure *op_ADD_144 = ADD(op_LSHIFT_141, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_146 = SHIFTRA(op_ADD_144, SN(32, 16)); + RzILOpPure *op_ADD_147 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_110), DUP(op_AND_110))), CAST(32, MSB(DUP(op_AND_110)), DUP(op_AND_110))), op_RSHIFT_146); + RzILOpPure *cond_216 = ITE(DUP(op_EQ_104), op_ADD_147, VARL("h_tmp270")); + RzILOpPure *op_AND_218 = LOGAND(cond_216, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_222 = SHIFTL0(op_AND_218, SN(32, 0x20)); + RzILOpPure *op_OR_223 = LOGOR(op_AND_7, op_LSHIFT_222); + RzILOpEffect *op_ASSIGN_224 = WRITE_REG(bundle, Rxx_op, op_OR_223); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_225 = SEQN(2, seq_215, op_ASSIGN_224); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_374 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_240 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_242 = LOGAND(op_RSHIFT_240, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_248 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_250 = LOGAND(op_RSHIFT_248, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_261 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_264 = LOGAND(op_RSHIFT_261, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_271 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_250), DUP(op_AND_250))), CAST(32, MSB(DUP(op_AND_250)), DUP(op_AND_250)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_250)), DUP(op_AND_250))), CAST(32, MSB(DUP(op_AND_250)), DUP(op_AND_250))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_250)), DUP(op_AND_250))), CAST(32, MSB(DUP(op_AND_250)), DUP(op_AND_250)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_250)), DUP(op_AND_250))), CAST(32, MSB(DUP(op_AND_250)), DUP(op_AND_250))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_264), DUP(op_AND_264))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_273 = SHIFTL0(op_MUL_271, SN(32, 1)); + RzILOpPure *op_ADD_276 = ADD(op_LSHIFT_273, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_278 = SHIFTRA(op_ADD_276, SN(32, 16)); + RzILOpPure *op_ADD_279 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_242), DUP(op_AND_242))), CAST(32, MSB(DUP(op_AND_242)), DUP(op_AND_242))), op_RSHIFT_278); + RzILOpPure *op_RSHIFT_288 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_290 = LOGAND(op_RSHIFT_288, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_296 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_298 = LOGAND(op_RSHIFT_296, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_309 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_312 = LOGAND(op_RSHIFT_309, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_319 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_298), DUP(op_AND_298))), CAST(32, MSB(DUP(op_AND_298)), DUP(op_AND_298)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_298)), DUP(op_AND_298))), CAST(32, MSB(DUP(op_AND_298)), DUP(op_AND_298))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_298)), DUP(op_AND_298))), CAST(32, MSB(DUP(op_AND_298)), DUP(op_AND_298)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_298)), DUP(op_AND_298))), CAST(32, MSB(DUP(op_AND_298)), DUP(op_AND_298))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_312), DUP(op_AND_312))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_321 = SHIFTL0(op_MUL_319, SN(32, 1)); + RzILOpPure *op_ADD_324 = ADD(op_LSHIFT_321, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_326 = SHIFTRA(op_ADD_324, SN(32, 16)); + RzILOpPure *op_ADD_327 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_290), DUP(op_AND_290))), CAST(32, MSB(DUP(op_AND_290)), DUP(op_AND_290))), op_RSHIFT_326); + RzILOpPure *op_EQ_328 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_279), SN(32, 0), SN(32, 0x20)), op_ADD_327); + RzILOpPure *op_RSHIFT_378 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_380 = LOGAND(op_RSHIFT_378, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_386 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_388 = LOGAND(op_RSHIFT_386, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_399 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_402 = LOGAND(op_RSHIFT_399, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_409 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_388), DUP(op_AND_388))), CAST(32, MSB(DUP(op_AND_388)), DUP(op_AND_388)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_388)), DUP(op_AND_388))), CAST(32, MSB(DUP(op_AND_388)), DUP(op_AND_388))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_388)), DUP(op_AND_388))), CAST(32, MSB(DUP(op_AND_388)), DUP(op_AND_388)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_388)), DUP(op_AND_388))), CAST(32, MSB(DUP(op_AND_388)), DUP(op_AND_388))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_402), DUP(op_AND_402))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_411 = SHIFTL0(op_MUL_409, SN(32, 1)); + RzILOpPure *op_ADD_414 = ADD(op_LSHIFT_411, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_416 = SHIFTRA(op_ADD_414, SN(32, 16)); + RzILOpPure *op_ADD_417 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_380), DUP(op_AND_380))), CAST(32, MSB(DUP(op_AND_380)), DUP(op_AND_380))), op_RSHIFT_416); + RzILOpPure *op_LT_420 = SLT(op_ADD_417, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_425 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_426 = NEG(op_LSHIFT_425); + RzILOpPure *op_LSHIFT_431 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_434 = SUB(op_LSHIFT_431, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_435 = ITE(op_LT_420, op_NEG_426, op_SUB_434); + RzILOpEffect *gcc_expr_436 = BRANCH(op_EQ_328, EMPTY(), set_usr_field_call_374); + + // h_tmp271 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_438 = SETL("h_tmp271", cond_435); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_439 = SEQN(2, gcc_expr_436, op_ASSIGN_hybrid_tmp_438); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)) ? ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10) : h_tmp271) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_231 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_232 = LOGNOT(op_LSHIFT_231); + RzILOpPure *op_AND_233 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_232); + RzILOpPure *op_RSHIFT_332 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_334 = LOGAND(op_RSHIFT_332, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_340 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_342 = LOGAND(op_RSHIFT_340, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_353 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_356 = LOGAND(op_RSHIFT_353, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_363 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_342), DUP(op_AND_342))), CAST(32, MSB(DUP(op_AND_342)), DUP(op_AND_342)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_342)), DUP(op_AND_342))), CAST(32, MSB(DUP(op_AND_342)), DUP(op_AND_342))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_342)), DUP(op_AND_342))), CAST(32, MSB(DUP(op_AND_342)), DUP(op_AND_342)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_342)), DUP(op_AND_342))), CAST(32, MSB(DUP(op_AND_342)), DUP(op_AND_342))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_356), DUP(op_AND_356))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_365 = SHIFTL0(op_MUL_363, SN(32, 1)); + RzILOpPure *op_ADD_368 = ADD(op_LSHIFT_365, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_370 = SHIFTRA(op_ADD_368, SN(32, 16)); + RzILOpPure *op_ADD_371 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_334), DUP(op_AND_334))), CAST(32, MSB(DUP(op_AND_334)), DUP(op_AND_334))), op_RSHIFT_370); + RzILOpPure *cond_440 = ITE(DUP(op_EQ_328), op_ADD_371, VARL("h_tmp271")); + RzILOpPure *op_AND_442 = LOGAND(cond_440, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_446 = SHIFTL0(op_AND_442, SN(32, 0)); + RzILOpPure *op_OR_447 = LOGOR(op_AND_233, op_LSHIFT_446); + RzILOpEffect *op_ASSIGN_448 = WRITE_REG(bundle, Rxx_op, op_OR_447); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_449 = SEQN(2, seq_439, op_ASSIGN_448); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_225, seq_449); + return instruction_sequence; +} + +// Rxx += vmpywoh(Rss,Rtt):sat +RzILOpEffect *hex_il_op_m2_mmachs_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_141 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(Rtt, SN(32, 0x30)); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_37, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_47 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_25), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_40), DUP(op_AND_40))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_49 = SHIFTL0(op_MUL_47, SN(32, 0)); + RzILOpPure *op_RSHIFT_51 = SHIFTRA(op_LSHIFT_49, SN(32, 16)); + RzILOpPure *op_ADD_52 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_16), DUP(op_AND_16))), CAST(32, MSB(DUP(op_AND_16)), DUP(op_AND_16))), op_RSHIFT_51); + RzILOpPure *op_RSHIFT_61 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_63 = LOGAND(op_RSHIFT_61, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_69 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_71 = LOGAND(op_RSHIFT_69, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_82 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_85 = LOGAND(op_RSHIFT_82, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_92 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_71), DUP(op_AND_71))), CAST(32, MSB(DUP(op_AND_71)), DUP(op_AND_71)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_71)), DUP(op_AND_71))), CAST(32, MSB(DUP(op_AND_71)), DUP(op_AND_71))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_71)), DUP(op_AND_71))), CAST(32, MSB(DUP(op_AND_71)), DUP(op_AND_71)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_71)), DUP(op_AND_71))), CAST(32, MSB(DUP(op_AND_71)), DUP(op_AND_71))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_85), DUP(op_AND_85))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_94 = SHIFTL0(op_MUL_92, SN(32, 0)); + RzILOpPure *op_RSHIFT_96 = SHIFTRA(op_LSHIFT_94, SN(32, 16)); + RzILOpPure *op_ADD_97 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_63), DUP(op_AND_63))), CAST(32, MSB(DUP(op_AND_63)), DUP(op_AND_63))), op_RSHIFT_96); + RzILOpPure *op_EQ_98 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_52), SN(32, 0), SN(32, 0x20)), op_ADD_97); + RzILOpPure *op_RSHIFT_145 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_147 = LOGAND(op_RSHIFT_145, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_153 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_155 = LOGAND(op_RSHIFT_153, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_166 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_169 = LOGAND(op_RSHIFT_166, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_176 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_155), DUP(op_AND_155))), CAST(32, MSB(DUP(op_AND_155)), DUP(op_AND_155)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_155)), DUP(op_AND_155))), CAST(32, MSB(DUP(op_AND_155)), DUP(op_AND_155))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_155)), DUP(op_AND_155))), CAST(32, MSB(DUP(op_AND_155)), DUP(op_AND_155)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_155)), DUP(op_AND_155))), CAST(32, MSB(DUP(op_AND_155)), DUP(op_AND_155))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_169), DUP(op_AND_169))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_178 = SHIFTL0(op_MUL_176, SN(32, 0)); + RzILOpPure *op_RSHIFT_180 = SHIFTRA(op_LSHIFT_178, SN(32, 16)); + RzILOpPure *op_ADD_181 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_147), DUP(op_AND_147))), CAST(32, MSB(DUP(op_AND_147)), DUP(op_AND_147))), op_RSHIFT_180); + RzILOpPure *op_LT_184 = SLT(op_ADD_181, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_189 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_190 = NEG(op_LSHIFT_189); + RzILOpPure *op_LSHIFT_195 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_198 = SUB(op_LSHIFT_195, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_199 = ITE(op_LT_184, op_NEG_190, op_SUB_198); + RzILOpEffect *gcc_expr_200 = BRANCH(op_EQ_98, EMPTY(), set_usr_field_call_141); + + // h_tmp272 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_202 = SETL("h_tmp272", cond_199); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_203 = SEQN(2, gcc_expr_200, op_ASSIGN_hybrid_tmp_202); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10)) ? ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10) : h_tmp272) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_102 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_104 = LOGAND(op_RSHIFT_102, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_110 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_112 = LOGAND(op_RSHIFT_110, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_123 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_126 = LOGAND(op_RSHIFT_123, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_133 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_112), DUP(op_AND_112))), CAST(32, MSB(DUP(op_AND_112)), DUP(op_AND_112)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_112)), DUP(op_AND_112))), CAST(32, MSB(DUP(op_AND_112)), DUP(op_AND_112))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_112)), DUP(op_AND_112))), CAST(32, MSB(DUP(op_AND_112)), DUP(op_AND_112)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_112)), DUP(op_AND_112))), CAST(32, MSB(DUP(op_AND_112)), DUP(op_AND_112))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_126), DUP(op_AND_126))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_135 = SHIFTL0(op_MUL_133, SN(32, 0)); + RzILOpPure *op_RSHIFT_137 = SHIFTRA(op_LSHIFT_135, SN(32, 16)); + RzILOpPure *op_ADD_138 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_104), DUP(op_AND_104))), CAST(32, MSB(DUP(op_AND_104)), DUP(op_AND_104))), op_RSHIFT_137); + RzILOpPure *cond_204 = ITE(DUP(op_EQ_98), op_ADD_138, VARL("h_tmp272")); + RzILOpPure *op_AND_206 = LOGAND(cond_204, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_210 = SHIFTL0(op_AND_206, SN(32, 0x20)); + RzILOpPure *op_OR_211 = LOGOR(op_AND_7, op_LSHIFT_210); + RzILOpEffect *op_ASSIGN_212 = WRITE_REG(bundle, Rxx_op, op_OR_211); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_213 = SEQN(2, seq_203, op_ASSIGN_212); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_353 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_228 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_230 = LOGAND(op_RSHIFT_228, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_236 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_238 = LOGAND(op_RSHIFT_236, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_249 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_252 = LOGAND(op_RSHIFT_249, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_259 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_238), DUP(op_AND_238))), CAST(32, MSB(DUP(op_AND_238)), DUP(op_AND_238)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_238)), DUP(op_AND_238))), CAST(32, MSB(DUP(op_AND_238)), DUP(op_AND_238))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_238)), DUP(op_AND_238))), CAST(32, MSB(DUP(op_AND_238)), DUP(op_AND_238)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_238)), DUP(op_AND_238))), CAST(32, MSB(DUP(op_AND_238)), DUP(op_AND_238))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_252), DUP(op_AND_252))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_261 = SHIFTL0(op_MUL_259, SN(32, 0)); + RzILOpPure *op_RSHIFT_263 = SHIFTRA(op_LSHIFT_261, SN(32, 16)); + RzILOpPure *op_ADD_264 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_230), DUP(op_AND_230))), CAST(32, MSB(DUP(op_AND_230)), DUP(op_AND_230))), op_RSHIFT_263); + RzILOpPure *op_RSHIFT_273 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_275 = LOGAND(op_RSHIFT_273, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_281 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_283 = LOGAND(op_RSHIFT_281, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_294 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_297 = LOGAND(op_RSHIFT_294, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_304 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_283), DUP(op_AND_283))), CAST(32, MSB(DUP(op_AND_283)), DUP(op_AND_283)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_283)), DUP(op_AND_283))), CAST(32, MSB(DUP(op_AND_283)), DUP(op_AND_283))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_283)), DUP(op_AND_283))), CAST(32, MSB(DUP(op_AND_283)), DUP(op_AND_283)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_283)), DUP(op_AND_283))), CAST(32, MSB(DUP(op_AND_283)), DUP(op_AND_283))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_297), DUP(op_AND_297))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_306 = SHIFTL0(op_MUL_304, SN(32, 0)); + RzILOpPure *op_RSHIFT_308 = SHIFTRA(op_LSHIFT_306, SN(32, 16)); + RzILOpPure *op_ADD_309 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_275), DUP(op_AND_275))), CAST(32, MSB(DUP(op_AND_275)), DUP(op_AND_275))), op_RSHIFT_308); + RzILOpPure *op_EQ_310 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_264), SN(32, 0), SN(32, 0x20)), op_ADD_309); + RzILOpPure *op_RSHIFT_357 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_359 = LOGAND(op_RSHIFT_357, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_365 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_367 = LOGAND(op_RSHIFT_365, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_378 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_381 = LOGAND(op_RSHIFT_378, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_388 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_367), DUP(op_AND_367))), CAST(32, MSB(DUP(op_AND_367)), DUP(op_AND_367)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_367)), DUP(op_AND_367))), CAST(32, MSB(DUP(op_AND_367)), DUP(op_AND_367))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_367)), DUP(op_AND_367))), CAST(32, MSB(DUP(op_AND_367)), DUP(op_AND_367)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_367)), DUP(op_AND_367))), CAST(32, MSB(DUP(op_AND_367)), DUP(op_AND_367))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_381), DUP(op_AND_381))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_390 = SHIFTL0(op_MUL_388, SN(32, 0)); + RzILOpPure *op_RSHIFT_392 = SHIFTRA(op_LSHIFT_390, SN(32, 16)); + RzILOpPure *op_ADD_393 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_359), DUP(op_AND_359))), CAST(32, MSB(DUP(op_AND_359)), DUP(op_AND_359))), op_RSHIFT_392); + RzILOpPure *op_LT_396 = SLT(op_ADD_393, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_401 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_402 = NEG(op_LSHIFT_401); + RzILOpPure *op_LSHIFT_407 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_410 = SUB(op_LSHIFT_407, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_411 = ITE(op_LT_396, op_NEG_402, op_SUB_410); + RzILOpEffect *gcc_expr_412 = BRANCH(op_EQ_310, EMPTY(), set_usr_field_call_353); + + // h_tmp273 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_414 = SETL("h_tmp273", cond_411); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_415 = SEQN(2, gcc_expr_412, op_ASSIGN_hybrid_tmp_414); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10)) ? ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10) : h_tmp273) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_219 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_220 = LOGNOT(op_LSHIFT_219); + RzILOpPure *op_AND_221 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_220); + RzILOpPure *op_RSHIFT_314 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_316 = LOGAND(op_RSHIFT_314, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_322 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_324 = LOGAND(op_RSHIFT_322, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_335 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_338 = LOGAND(op_RSHIFT_335, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_345 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_324), DUP(op_AND_324))), CAST(32, MSB(DUP(op_AND_324)), DUP(op_AND_324)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_324)), DUP(op_AND_324))), CAST(32, MSB(DUP(op_AND_324)), DUP(op_AND_324))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_324)), DUP(op_AND_324))), CAST(32, MSB(DUP(op_AND_324)), DUP(op_AND_324)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_324)), DUP(op_AND_324))), CAST(32, MSB(DUP(op_AND_324)), DUP(op_AND_324))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_338), DUP(op_AND_338))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_347 = SHIFTL0(op_MUL_345, SN(32, 0)); + RzILOpPure *op_RSHIFT_349 = SHIFTRA(op_LSHIFT_347, SN(32, 16)); + RzILOpPure *op_ADD_350 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_316), DUP(op_AND_316))), CAST(32, MSB(DUP(op_AND_316)), DUP(op_AND_316))), op_RSHIFT_349); + RzILOpPure *cond_416 = ITE(DUP(op_EQ_310), op_ADD_350, VARL("h_tmp273")); + RzILOpPure *op_AND_418 = LOGAND(cond_416, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_422 = SHIFTL0(op_AND_418, SN(32, 0)); + RzILOpPure *op_OR_423 = LOGOR(op_AND_221, op_LSHIFT_422); + RzILOpEffect *op_ASSIGN_424 = WRITE_REG(bundle, Rxx_op, op_OR_423); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_425 = SEQN(2, seq_415, op_ASSIGN_424); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_213, seq_425); + return instruction_sequence; +} + +// Rxx += vmpywoh(Rss,Rtt):<<1:sat +RzILOpEffect *hex_il_op_m2_mmachs_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_141 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(Rtt, SN(32, 0x30)); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_37, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_47 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_25), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_40), DUP(op_AND_40))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_49 = SHIFTL0(op_MUL_47, SN(32, 1)); + RzILOpPure *op_RSHIFT_51 = SHIFTRA(op_LSHIFT_49, SN(32, 16)); + RzILOpPure *op_ADD_52 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_16), DUP(op_AND_16))), CAST(32, MSB(DUP(op_AND_16)), DUP(op_AND_16))), op_RSHIFT_51); + RzILOpPure *op_RSHIFT_61 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_63 = LOGAND(op_RSHIFT_61, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_69 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_71 = LOGAND(op_RSHIFT_69, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_82 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_85 = LOGAND(op_RSHIFT_82, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_92 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_71), DUP(op_AND_71))), CAST(32, MSB(DUP(op_AND_71)), DUP(op_AND_71)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_71)), DUP(op_AND_71))), CAST(32, MSB(DUP(op_AND_71)), DUP(op_AND_71))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_71)), DUP(op_AND_71))), CAST(32, MSB(DUP(op_AND_71)), DUP(op_AND_71)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_71)), DUP(op_AND_71))), CAST(32, MSB(DUP(op_AND_71)), DUP(op_AND_71))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_85), DUP(op_AND_85))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_94 = SHIFTL0(op_MUL_92, SN(32, 1)); + RzILOpPure *op_RSHIFT_96 = SHIFTRA(op_LSHIFT_94, SN(32, 16)); + RzILOpPure *op_ADD_97 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_63), DUP(op_AND_63))), CAST(32, MSB(DUP(op_AND_63)), DUP(op_AND_63))), op_RSHIFT_96); + RzILOpPure *op_EQ_98 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_52), SN(32, 0), SN(32, 0x20)), op_ADD_97); + RzILOpPure *op_RSHIFT_145 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_147 = LOGAND(op_RSHIFT_145, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_153 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_155 = LOGAND(op_RSHIFT_153, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_166 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_169 = LOGAND(op_RSHIFT_166, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_176 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_155), DUP(op_AND_155))), CAST(32, MSB(DUP(op_AND_155)), DUP(op_AND_155)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_155)), DUP(op_AND_155))), CAST(32, MSB(DUP(op_AND_155)), DUP(op_AND_155))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_155)), DUP(op_AND_155))), CAST(32, MSB(DUP(op_AND_155)), DUP(op_AND_155)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_155)), DUP(op_AND_155))), CAST(32, MSB(DUP(op_AND_155)), DUP(op_AND_155))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_169), DUP(op_AND_169))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_178 = SHIFTL0(op_MUL_176, SN(32, 1)); + RzILOpPure *op_RSHIFT_180 = SHIFTRA(op_LSHIFT_178, SN(32, 16)); + RzILOpPure *op_ADD_181 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_147), DUP(op_AND_147))), CAST(32, MSB(DUP(op_AND_147)), DUP(op_AND_147))), op_RSHIFT_180); + RzILOpPure *op_LT_184 = SLT(op_ADD_181, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_189 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_190 = NEG(op_LSHIFT_189); + RzILOpPure *op_LSHIFT_195 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_198 = SUB(op_LSHIFT_195, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_199 = ITE(op_LT_184, op_NEG_190, op_SUB_198); + RzILOpEffect *gcc_expr_200 = BRANCH(op_EQ_98, EMPTY(), set_usr_field_call_141); + + // h_tmp274 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_202 = SETL("h_tmp274", cond_199); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_203 = SEQN(2, gcc_expr_200, op_ASSIGN_hybrid_tmp_202); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10)) ? ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10) : h_tmp274) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_102 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_104 = LOGAND(op_RSHIFT_102, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_110 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_112 = LOGAND(op_RSHIFT_110, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_123 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_126 = LOGAND(op_RSHIFT_123, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_133 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_112), DUP(op_AND_112))), CAST(32, MSB(DUP(op_AND_112)), DUP(op_AND_112)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_112)), DUP(op_AND_112))), CAST(32, MSB(DUP(op_AND_112)), DUP(op_AND_112))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_112)), DUP(op_AND_112))), CAST(32, MSB(DUP(op_AND_112)), DUP(op_AND_112)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_112)), DUP(op_AND_112))), CAST(32, MSB(DUP(op_AND_112)), DUP(op_AND_112))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_126), DUP(op_AND_126))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_135 = SHIFTL0(op_MUL_133, SN(32, 1)); + RzILOpPure *op_RSHIFT_137 = SHIFTRA(op_LSHIFT_135, SN(32, 16)); + RzILOpPure *op_ADD_138 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_104), DUP(op_AND_104))), CAST(32, MSB(DUP(op_AND_104)), DUP(op_AND_104))), op_RSHIFT_137); + RzILOpPure *cond_204 = ITE(DUP(op_EQ_98), op_ADD_138, VARL("h_tmp274")); + RzILOpPure *op_AND_206 = LOGAND(cond_204, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_210 = SHIFTL0(op_AND_206, SN(32, 0x20)); + RzILOpPure *op_OR_211 = LOGOR(op_AND_7, op_LSHIFT_210); + RzILOpEffect *op_ASSIGN_212 = WRITE_REG(bundle, Rxx_op, op_OR_211); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_213 = SEQN(2, seq_203, op_ASSIGN_212); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_353 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_228 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_230 = LOGAND(op_RSHIFT_228, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_236 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_238 = LOGAND(op_RSHIFT_236, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_249 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_252 = LOGAND(op_RSHIFT_249, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_259 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_238), DUP(op_AND_238))), CAST(32, MSB(DUP(op_AND_238)), DUP(op_AND_238)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_238)), DUP(op_AND_238))), CAST(32, MSB(DUP(op_AND_238)), DUP(op_AND_238))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_238)), DUP(op_AND_238))), CAST(32, MSB(DUP(op_AND_238)), DUP(op_AND_238)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_238)), DUP(op_AND_238))), CAST(32, MSB(DUP(op_AND_238)), DUP(op_AND_238))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_252), DUP(op_AND_252))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_261 = SHIFTL0(op_MUL_259, SN(32, 1)); + RzILOpPure *op_RSHIFT_263 = SHIFTRA(op_LSHIFT_261, SN(32, 16)); + RzILOpPure *op_ADD_264 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_230), DUP(op_AND_230))), CAST(32, MSB(DUP(op_AND_230)), DUP(op_AND_230))), op_RSHIFT_263); + RzILOpPure *op_RSHIFT_273 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_275 = LOGAND(op_RSHIFT_273, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_281 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_283 = LOGAND(op_RSHIFT_281, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_294 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_297 = LOGAND(op_RSHIFT_294, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_304 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_283), DUP(op_AND_283))), CAST(32, MSB(DUP(op_AND_283)), DUP(op_AND_283)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_283)), DUP(op_AND_283))), CAST(32, MSB(DUP(op_AND_283)), DUP(op_AND_283))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_283)), DUP(op_AND_283))), CAST(32, MSB(DUP(op_AND_283)), DUP(op_AND_283)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_283)), DUP(op_AND_283))), CAST(32, MSB(DUP(op_AND_283)), DUP(op_AND_283))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_297), DUP(op_AND_297))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_306 = SHIFTL0(op_MUL_304, SN(32, 1)); + RzILOpPure *op_RSHIFT_308 = SHIFTRA(op_LSHIFT_306, SN(32, 16)); + RzILOpPure *op_ADD_309 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_275), DUP(op_AND_275))), CAST(32, MSB(DUP(op_AND_275)), DUP(op_AND_275))), op_RSHIFT_308); + RzILOpPure *op_EQ_310 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_264), SN(32, 0), SN(32, 0x20)), op_ADD_309); + RzILOpPure *op_RSHIFT_357 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_359 = LOGAND(op_RSHIFT_357, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_365 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_367 = LOGAND(op_RSHIFT_365, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_378 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_381 = LOGAND(op_RSHIFT_378, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_388 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_367), DUP(op_AND_367))), CAST(32, MSB(DUP(op_AND_367)), DUP(op_AND_367)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_367)), DUP(op_AND_367))), CAST(32, MSB(DUP(op_AND_367)), DUP(op_AND_367))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_367)), DUP(op_AND_367))), CAST(32, MSB(DUP(op_AND_367)), DUP(op_AND_367)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_367)), DUP(op_AND_367))), CAST(32, MSB(DUP(op_AND_367)), DUP(op_AND_367))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_381), DUP(op_AND_381))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_390 = SHIFTL0(op_MUL_388, SN(32, 1)); + RzILOpPure *op_RSHIFT_392 = SHIFTRA(op_LSHIFT_390, SN(32, 16)); + RzILOpPure *op_ADD_393 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_359), DUP(op_AND_359))), CAST(32, MSB(DUP(op_AND_359)), DUP(op_AND_359))), op_RSHIFT_392); + RzILOpPure *op_LT_396 = SLT(op_ADD_393, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_401 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_402 = NEG(op_LSHIFT_401); + RzILOpPure *op_LSHIFT_407 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_410 = SUB(op_LSHIFT_407, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_411 = ITE(op_LT_396, op_NEG_402, op_SUB_410); + RzILOpEffect *gcc_expr_412 = BRANCH(op_EQ_310, EMPTY(), set_usr_field_call_353); + + // h_tmp275 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_414 = SETL("h_tmp275", cond_411); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_415 = SEQN(2, gcc_expr_412, op_ASSIGN_hybrid_tmp_414); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10)) ? ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10) : h_tmp275) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_219 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_220 = LOGNOT(op_LSHIFT_219); + RzILOpPure *op_AND_221 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_220); + RzILOpPure *op_RSHIFT_314 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_316 = LOGAND(op_RSHIFT_314, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_322 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_324 = LOGAND(op_RSHIFT_322, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_335 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_338 = LOGAND(op_RSHIFT_335, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_345 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_324), DUP(op_AND_324))), CAST(32, MSB(DUP(op_AND_324)), DUP(op_AND_324)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_324)), DUP(op_AND_324))), CAST(32, MSB(DUP(op_AND_324)), DUP(op_AND_324))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_324)), DUP(op_AND_324))), CAST(32, MSB(DUP(op_AND_324)), DUP(op_AND_324)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_324)), DUP(op_AND_324))), CAST(32, MSB(DUP(op_AND_324)), DUP(op_AND_324))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_338), DUP(op_AND_338))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_347 = SHIFTL0(op_MUL_345, SN(32, 1)); + RzILOpPure *op_RSHIFT_349 = SHIFTRA(op_LSHIFT_347, SN(32, 16)); + RzILOpPure *op_ADD_350 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_316), DUP(op_AND_316))), CAST(32, MSB(DUP(op_AND_316)), DUP(op_AND_316))), op_RSHIFT_349); + RzILOpPure *cond_416 = ITE(DUP(op_EQ_310), op_ADD_350, VARL("h_tmp275")); + RzILOpPure *op_AND_418 = LOGAND(cond_416, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_422 = SHIFTL0(op_AND_418, SN(32, 0)); + RzILOpPure *op_OR_423 = LOGOR(op_AND_221, op_LSHIFT_422); + RzILOpEffect *op_ASSIGN_424 = WRITE_REG(bundle, Rxx_op, op_OR_423); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_425 = SEQN(2, seq_415, op_ASSIGN_424); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_213, seq_425); + return instruction_sequence; +} + +// Rxx += vmpyweh(Rss,Rtt):rnd:sat +RzILOpEffect *hex_il_op_m2_mmacls_rs0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_150 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(Rtt, SN(32, 0x20)); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_37, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_47 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_25), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_40), DUP(op_AND_40))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_49 = SHIFTL0(op_MUL_47, SN(32, 0)); + RzILOpPure *op_ADD_52 = ADD(op_LSHIFT_49, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_54 = SHIFTRA(op_ADD_52, SN(32, 16)); + RzILOpPure *op_ADD_55 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_16), DUP(op_AND_16))), CAST(32, MSB(DUP(op_AND_16)), DUP(op_AND_16))), op_RSHIFT_54); + RzILOpPure *op_RSHIFT_64 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_66 = LOGAND(op_RSHIFT_64, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_72 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_74 = LOGAND(op_RSHIFT_72, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_85 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_88 = LOGAND(op_RSHIFT_85, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_95 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_74), DUP(op_AND_74))), CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))), CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))), CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))), CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_88), DUP(op_AND_88))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_97 = SHIFTL0(op_MUL_95, SN(32, 0)); + RzILOpPure *op_ADD_100 = ADD(op_LSHIFT_97, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_102 = SHIFTRA(op_ADD_100, SN(32, 16)); + RzILOpPure *op_ADD_103 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_66), DUP(op_AND_66))), CAST(32, MSB(DUP(op_AND_66)), DUP(op_AND_66))), op_RSHIFT_102); + RzILOpPure *op_EQ_104 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_55), SN(32, 0), SN(32, 0x20)), op_ADD_103); + RzILOpPure *op_RSHIFT_154 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_156 = LOGAND(op_RSHIFT_154, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_162 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_164 = LOGAND(op_RSHIFT_162, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_175 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_178 = LOGAND(op_RSHIFT_175, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_185 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_164), DUP(op_AND_164))), CAST(32, MSB(DUP(op_AND_164)), DUP(op_AND_164)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_164)), DUP(op_AND_164))), CAST(32, MSB(DUP(op_AND_164)), DUP(op_AND_164))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_164)), DUP(op_AND_164))), CAST(32, MSB(DUP(op_AND_164)), DUP(op_AND_164)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_164)), DUP(op_AND_164))), CAST(32, MSB(DUP(op_AND_164)), DUP(op_AND_164))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_178), DUP(op_AND_178))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_187 = SHIFTL0(op_MUL_185, SN(32, 0)); + RzILOpPure *op_ADD_190 = ADD(op_LSHIFT_187, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_192 = SHIFTRA(op_ADD_190, SN(32, 16)); + RzILOpPure *op_ADD_193 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_156), DUP(op_AND_156))), CAST(32, MSB(DUP(op_AND_156)), DUP(op_AND_156))), op_RSHIFT_192); + RzILOpPure *op_LT_196 = SLT(op_ADD_193, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_201 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_202 = NEG(op_LSHIFT_201); + RzILOpPure *op_LSHIFT_207 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_210 = SUB(op_LSHIFT_207, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_211 = ITE(op_LT_196, op_NEG_202, op_SUB_210); + RzILOpEffect *gcc_expr_212 = BRANCH(op_EQ_104, EMPTY(), set_usr_field_call_150); + + // h_tmp276 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_214 = SETL("h_tmp276", cond_211); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_215 = SEQN(2, gcc_expr_212, op_ASSIGN_hybrid_tmp_214); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10)) ? ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10) : h_tmp276) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_108 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_110 = LOGAND(op_RSHIFT_108, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_116 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_118 = LOGAND(op_RSHIFT_116, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_129 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_132 = LOGAND(op_RSHIFT_129, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_139 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_118), DUP(op_AND_118))), CAST(32, MSB(DUP(op_AND_118)), DUP(op_AND_118)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_118)), DUP(op_AND_118))), CAST(32, MSB(DUP(op_AND_118)), DUP(op_AND_118))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_118)), DUP(op_AND_118))), CAST(32, MSB(DUP(op_AND_118)), DUP(op_AND_118)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_118)), DUP(op_AND_118))), CAST(32, MSB(DUP(op_AND_118)), DUP(op_AND_118))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_132), DUP(op_AND_132))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_141 = SHIFTL0(op_MUL_139, SN(32, 0)); + RzILOpPure *op_ADD_144 = ADD(op_LSHIFT_141, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_146 = SHIFTRA(op_ADD_144, SN(32, 16)); + RzILOpPure *op_ADD_147 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_110), DUP(op_AND_110))), CAST(32, MSB(DUP(op_AND_110)), DUP(op_AND_110))), op_RSHIFT_146); + RzILOpPure *cond_216 = ITE(DUP(op_EQ_104), op_ADD_147, VARL("h_tmp276")); + RzILOpPure *op_AND_218 = LOGAND(cond_216, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_222 = SHIFTL0(op_AND_218, SN(32, 0x20)); + RzILOpPure *op_OR_223 = LOGOR(op_AND_7, op_LSHIFT_222); + RzILOpEffect *op_ASSIGN_224 = WRITE_REG(bundle, Rxx_op, op_OR_223); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_225 = SEQN(2, seq_215, op_ASSIGN_224); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_374 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_240 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_242 = LOGAND(op_RSHIFT_240, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_248 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_250 = LOGAND(op_RSHIFT_248, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_261 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_264 = LOGAND(op_RSHIFT_261, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_271 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_250), DUP(op_AND_250))), CAST(32, MSB(DUP(op_AND_250)), DUP(op_AND_250)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_250)), DUP(op_AND_250))), CAST(32, MSB(DUP(op_AND_250)), DUP(op_AND_250))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_250)), DUP(op_AND_250))), CAST(32, MSB(DUP(op_AND_250)), DUP(op_AND_250)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_250)), DUP(op_AND_250))), CAST(32, MSB(DUP(op_AND_250)), DUP(op_AND_250))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_264), DUP(op_AND_264))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_273 = SHIFTL0(op_MUL_271, SN(32, 0)); + RzILOpPure *op_ADD_276 = ADD(op_LSHIFT_273, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_278 = SHIFTRA(op_ADD_276, SN(32, 16)); + RzILOpPure *op_ADD_279 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_242), DUP(op_AND_242))), CAST(32, MSB(DUP(op_AND_242)), DUP(op_AND_242))), op_RSHIFT_278); + RzILOpPure *op_RSHIFT_288 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_290 = LOGAND(op_RSHIFT_288, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_296 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_298 = LOGAND(op_RSHIFT_296, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_309 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_312 = LOGAND(op_RSHIFT_309, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_319 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_298), DUP(op_AND_298))), CAST(32, MSB(DUP(op_AND_298)), DUP(op_AND_298)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_298)), DUP(op_AND_298))), CAST(32, MSB(DUP(op_AND_298)), DUP(op_AND_298))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_298)), DUP(op_AND_298))), CAST(32, MSB(DUP(op_AND_298)), DUP(op_AND_298)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_298)), DUP(op_AND_298))), CAST(32, MSB(DUP(op_AND_298)), DUP(op_AND_298))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_312), DUP(op_AND_312))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_321 = SHIFTL0(op_MUL_319, SN(32, 0)); + RzILOpPure *op_ADD_324 = ADD(op_LSHIFT_321, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_326 = SHIFTRA(op_ADD_324, SN(32, 16)); + RzILOpPure *op_ADD_327 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_290), DUP(op_AND_290))), CAST(32, MSB(DUP(op_AND_290)), DUP(op_AND_290))), op_RSHIFT_326); + RzILOpPure *op_EQ_328 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_279), SN(32, 0), SN(32, 0x20)), op_ADD_327); + RzILOpPure *op_RSHIFT_378 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_380 = LOGAND(op_RSHIFT_378, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_386 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_388 = LOGAND(op_RSHIFT_386, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_399 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_402 = LOGAND(op_RSHIFT_399, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_409 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_388), DUP(op_AND_388))), CAST(32, MSB(DUP(op_AND_388)), DUP(op_AND_388)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_388)), DUP(op_AND_388))), CAST(32, MSB(DUP(op_AND_388)), DUP(op_AND_388))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_388)), DUP(op_AND_388))), CAST(32, MSB(DUP(op_AND_388)), DUP(op_AND_388)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_388)), DUP(op_AND_388))), CAST(32, MSB(DUP(op_AND_388)), DUP(op_AND_388))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_402), DUP(op_AND_402))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_411 = SHIFTL0(op_MUL_409, SN(32, 0)); + RzILOpPure *op_ADD_414 = ADD(op_LSHIFT_411, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_416 = SHIFTRA(op_ADD_414, SN(32, 16)); + RzILOpPure *op_ADD_417 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_380), DUP(op_AND_380))), CAST(32, MSB(DUP(op_AND_380)), DUP(op_AND_380))), op_RSHIFT_416); + RzILOpPure *op_LT_420 = SLT(op_ADD_417, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_425 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_426 = NEG(op_LSHIFT_425); + RzILOpPure *op_LSHIFT_431 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_434 = SUB(op_LSHIFT_431, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_435 = ITE(op_LT_420, op_NEG_426, op_SUB_434); + RzILOpEffect *gcc_expr_436 = BRANCH(op_EQ_328, EMPTY(), set_usr_field_call_374); + + // h_tmp277 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_438 = SETL("h_tmp277", cond_435); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_439 = SEQN(2, gcc_expr_436, op_ASSIGN_hybrid_tmp_438); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10)) ? ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10) : h_tmp277) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_231 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_232 = LOGNOT(op_LSHIFT_231); + RzILOpPure *op_AND_233 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_232); + RzILOpPure *op_RSHIFT_332 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_334 = LOGAND(op_RSHIFT_332, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_340 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_342 = LOGAND(op_RSHIFT_340, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_353 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_356 = LOGAND(op_RSHIFT_353, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_363 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_342), DUP(op_AND_342))), CAST(32, MSB(DUP(op_AND_342)), DUP(op_AND_342)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_342)), DUP(op_AND_342))), CAST(32, MSB(DUP(op_AND_342)), DUP(op_AND_342))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_342)), DUP(op_AND_342))), CAST(32, MSB(DUP(op_AND_342)), DUP(op_AND_342)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_342)), DUP(op_AND_342))), CAST(32, MSB(DUP(op_AND_342)), DUP(op_AND_342))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_356), DUP(op_AND_356))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_365 = SHIFTL0(op_MUL_363, SN(32, 0)); + RzILOpPure *op_ADD_368 = ADD(op_LSHIFT_365, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_370 = SHIFTRA(op_ADD_368, SN(32, 16)); + RzILOpPure *op_ADD_371 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_334), DUP(op_AND_334))), CAST(32, MSB(DUP(op_AND_334)), DUP(op_AND_334))), op_RSHIFT_370); + RzILOpPure *cond_440 = ITE(DUP(op_EQ_328), op_ADD_371, VARL("h_tmp277")); + RzILOpPure *op_AND_442 = LOGAND(cond_440, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_446 = SHIFTL0(op_AND_442, SN(32, 0)); + RzILOpPure *op_OR_447 = LOGOR(op_AND_233, op_LSHIFT_446); + RzILOpEffect *op_ASSIGN_448 = WRITE_REG(bundle, Rxx_op, op_OR_447); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_449 = SEQN(2, seq_439, op_ASSIGN_448); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_225, seq_449); + return instruction_sequence; +} + +// Rxx += vmpyweh(Rss,Rtt):<<1:rnd:sat +RzILOpEffect *hex_il_op_m2_mmacls_rs1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_150 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(Rtt, SN(32, 0x20)); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_37, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_47 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_25), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_40), DUP(op_AND_40))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_49 = SHIFTL0(op_MUL_47, SN(32, 1)); + RzILOpPure *op_ADD_52 = ADD(op_LSHIFT_49, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_54 = SHIFTRA(op_ADD_52, SN(32, 16)); + RzILOpPure *op_ADD_55 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_16), DUP(op_AND_16))), CAST(32, MSB(DUP(op_AND_16)), DUP(op_AND_16))), op_RSHIFT_54); + RzILOpPure *op_RSHIFT_64 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_66 = LOGAND(op_RSHIFT_64, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_72 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_74 = LOGAND(op_RSHIFT_72, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_85 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_88 = LOGAND(op_RSHIFT_85, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_95 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_74), DUP(op_AND_74))), CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))), CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))), CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))), CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_88), DUP(op_AND_88))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_97 = SHIFTL0(op_MUL_95, SN(32, 1)); + RzILOpPure *op_ADD_100 = ADD(op_LSHIFT_97, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_102 = SHIFTRA(op_ADD_100, SN(32, 16)); + RzILOpPure *op_ADD_103 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_66), DUP(op_AND_66))), CAST(32, MSB(DUP(op_AND_66)), DUP(op_AND_66))), op_RSHIFT_102); + RzILOpPure *op_EQ_104 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_55), SN(32, 0), SN(32, 0x20)), op_ADD_103); + RzILOpPure *op_RSHIFT_154 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_156 = LOGAND(op_RSHIFT_154, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_162 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_164 = LOGAND(op_RSHIFT_162, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_175 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_178 = LOGAND(op_RSHIFT_175, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_185 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_164), DUP(op_AND_164))), CAST(32, MSB(DUP(op_AND_164)), DUP(op_AND_164)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_164)), DUP(op_AND_164))), CAST(32, MSB(DUP(op_AND_164)), DUP(op_AND_164))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_164)), DUP(op_AND_164))), CAST(32, MSB(DUP(op_AND_164)), DUP(op_AND_164)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_164)), DUP(op_AND_164))), CAST(32, MSB(DUP(op_AND_164)), DUP(op_AND_164))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_178), DUP(op_AND_178))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_187 = SHIFTL0(op_MUL_185, SN(32, 1)); + RzILOpPure *op_ADD_190 = ADD(op_LSHIFT_187, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_192 = SHIFTRA(op_ADD_190, SN(32, 16)); + RzILOpPure *op_ADD_193 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_156), DUP(op_AND_156))), CAST(32, MSB(DUP(op_AND_156)), DUP(op_AND_156))), op_RSHIFT_192); + RzILOpPure *op_LT_196 = SLT(op_ADD_193, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_201 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_202 = NEG(op_LSHIFT_201); + RzILOpPure *op_LSHIFT_207 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_210 = SUB(op_LSHIFT_207, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_211 = ITE(op_LT_196, op_NEG_202, op_SUB_210); + RzILOpEffect *gcc_expr_212 = BRANCH(op_EQ_104, EMPTY(), set_usr_field_call_150); + + // h_tmp278 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_214 = SETL("h_tmp278", cond_211); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_215 = SEQN(2, gcc_expr_212, op_ASSIGN_hybrid_tmp_214); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)) ? ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10) : h_tmp278) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_108 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_110 = LOGAND(op_RSHIFT_108, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_116 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_118 = LOGAND(op_RSHIFT_116, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_129 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_132 = LOGAND(op_RSHIFT_129, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_139 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_118), DUP(op_AND_118))), CAST(32, MSB(DUP(op_AND_118)), DUP(op_AND_118)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_118)), DUP(op_AND_118))), CAST(32, MSB(DUP(op_AND_118)), DUP(op_AND_118))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_118)), DUP(op_AND_118))), CAST(32, MSB(DUP(op_AND_118)), DUP(op_AND_118)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_118)), DUP(op_AND_118))), CAST(32, MSB(DUP(op_AND_118)), DUP(op_AND_118))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_132), DUP(op_AND_132))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_141 = SHIFTL0(op_MUL_139, SN(32, 1)); + RzILOpPure *op_ADD_144 = ADD(op_LSHIFT_141, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_146 = SHIFTRA(op_ADD_144, SN(32, 16)); + RzILOpPure *op_ADD_147 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_110), DUP(op_AND_110))), CAST(32, MSB(DUP(op_AND_110)), DUP(op_AND_110))), op_RSHIFT_146); + RzILOpPure *cond_216 = ITE(DUP(op_EQ_104), op_ADD_147, VARL("h_tmp278")); + RzILOpPure *op_AND_218 = LOGAND(cond_216, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_222 = SHIFTL0(op_AND_218, SN(32, 0x20)); + RzILOpPure *op_OR_223 = LOGOR(op_AND_7, op_LSHIFT_222); + RzILOpEffect *op_ASSIGN_224 = WRITE_REG(bundle, Rxx_op, op_OR_223); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_225 = SEQN(2, seq_215, op_ASSIGN_224); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_374 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_240 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_242 = LOGAND(op_RSHIFT_240, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_248 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_250 = LOGAND(op_RSHIFT_248, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_261 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_264 = LOGAND(op_RSHIFT_261, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_271 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_250), DUP(op_AND_250))), CAST(32, MSB(DUP(op_AND_250)), DUP(op_AND_250)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_250)), DUP(op_AND_250))), CAST(32, MSB(DUP(op_AND_250)), DUP(op_AND_250))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_250)), DUP(op_AND_250))), CAST(32, MSB(DUP(op_AND_250)), DUP(op_AND_250)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_250)), DUP(op_AND_250))), CAST(32, MSB(DUP(op_AND_250)), DUP(op_AND_250))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_264), DUP(op_AND_264))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_273 = SHIFTL0(op_MUL_271, SN(32, 1)); + RzILOpPure *op_ADD_276 = ADD(op_LSHIFT_273, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_278 = SHIFTRA(op_ADD_276, SN(32, 16)); + RzILOpPure *op_ADD_279 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_242), DUP(op_AND_242))), CAST(32, MSB(DUP(op_AND_242)), DUP(op_AND_242))), op_RSHIFT_278); + RzILOpPure *op_RSHIFT_288 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_290 = LOGAND(op_RSHIFT_288, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_296 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_298 = LOGAND(op_RSHIFT_296, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_309 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_312 = LOGAND(op_RSHIFT_309, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_319 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_298), DUP(op_AND_298))), CAST(32, MSB(DUP(op_AND_298)), DUP(op_AND_298)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_298)), DUP(op_AND_298))), CAST(32, MSB(DUP(op_AND_298)), DUP(op_AND_298))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_298)), DUP(op_AND_298))), CAST(32, MSB(DUP(op_AND_298)), DUP(op_AND_298)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_298)), DUP(op_AND_298))), CAST(32, MSB(DUP(op_AND_298)), DUP(op_AND_298))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_312), DUP(op_AND_312))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_321 = SHIFTL0(op_MUL_319, SN(32, 1)); + RzILOpPure *op_ADD_324 = ADD(op_LSHIFT_321, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_326 = SHIFTRA(op_ADD_324, SN(32, 16)); + RzILOpPure *op_ADD_327 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_290), DUP(op_AND_290))), CAST(32, MSB(DUP(op_AND_290)), DUP(op_AND_290))), op_RSHIFT_326); + RzILOpPure *op_EQ_328 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_279), SN(32, 0), SN(32, 0x20)), op_ADD_327); + RzILOpPure *op_RSHIFT_378 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_380 = LOGAND(op_RSHIFT_378, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_386 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_388 = LOGAND(op_RSHIFT_386, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_399 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_402 = LOGAND(op_RSHIFT_399, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_409 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_388), DUP(op_AND_388))), CAST(32, MSB(DUP(op_AND_388)), DUP(op_AND_388)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_388)), DUP(op_AND_388))), CAST(32, MSB(DUP(op_AND_388)), DUP(op_AND_388))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_388)), DUP(op_AND_388))), CAST(32, MSB(DUP(op_AND_388)), DUP(op_AND_388)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_388)), DUP(op_AND_388))), CAST(32, MSB(DUP(op_AND_388)), DUP(op_AND_388))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_402), DUP(op_AND_402))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_411 = SHIFTL0(op_MUL_409, SN(32, 1)); + RzILOpPure *op_ADD_414 = ADD(op_LSHIFT_411, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_416 = SHIFTRA(op_ADD_414, SN(32, 16)); + RzILOpPure *op_ADD_417 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_380), DUP(op_AND_380))), CAST(32, MSB(DUP(op_AND_380)), DUP(op_AND_380))), op_RSHIFT_416); + RzILOpPure *op_LT_420 = SLT(op_ADD_417, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_425 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_426 = NEG(op_LSHIFT_425); + RzILOpPure *op_LSHIFT_431 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_434 = SUB(op_LSHIFT_431, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_435 = ITE(op_LT_420, op_NEG_426, op_SUB_434); + RzILOpEffect *gcc_expr_436 = BRANCH(op_EQ_328, EMPTY(), set_usr_field_call_374); + + // h_tmp279 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_438 = SETL("h_tmp279", cond_435); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_439 = SEQN(2, gcc_expr_436, op_ASSIGN_hybrid_tmp_438); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)) ? ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10) : h_tmp279) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_231 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_232 = LOGNOT(op_LSHIFT_231); + RzILOpPure *op_AND_233 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_232); + RzILOpPure *op_RSHIFT_332 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_334 = LOGAND(op_RSHIFT_332, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_340 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_342 = LOGAND(op_RSHIFT_340, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_353 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_356 = LOGAND(op_RSHIFT_353, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_363 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_342), DUP(op_AND_342))), CAST(32, MSB(DUP(op_AND_342)), DUP(op_AND_342)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_342)), DUP(op_AND_342))), CAST(32, MSB(DUP(op_AND_342)), DUP(op_AND_342))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_342)), DUP(op_AND_342))), CAST(32, MSB(DUP(op_AND_342)), DUP(op_AND_342)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_342)), DUP(op_AND_342))), CAST(32, MSB(DUP(op_AND_342)), DUP(op_AND_342))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_356), DUP(op_AND_356))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_365 = SHIFTL0(op_MUL_363, SN(32, 1)); + RzILOpPure *op_ADD_368 = ADD(op_LSHIFT_365, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_370 = SHIFTRA(op_ADD_368, SN(32, 16)); + RzILOpPure *op_ADD_371 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_334), DUP(op_AND_334))), CAST(32, MSB(DUP(op_AND_334)), DUP(op_AND_334))), op_RSHIFT_370); + RzILOpPure *cond_440 = ITE(DUP(op_EQ_328), op_ADD_371, VARL("h_tmp279")); + RzILOpPure *op_AND_442 = LOGAND(cond_440, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_446 = SHIFTL0(op_AND_442, SN(32, 0)); + RzILOpPure *op_OR_447 = LOGOR(op_AND_233, op_LSHIFT_446); + RzILOpEffect *op_ASSIGN_448 = WRITE_REG(bundle, Rxx_op, op_OR_447); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_449 = SEQN(2, seq_439, op_ASSIGN_448); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_225, seq_449); + return instruction_sequence; +} + +// Rxx += vmpyweh(Rss,Rtt):sat +RzILOpEffect *hex_il_op_m2_mmacls_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_141 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(Rtt, SN(32, 0x20)); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_37, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_47 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_25), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_40), DUP(op_AND_40))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_49 = SHIFTL0(op_MUL_47, SN(32, 0)); + RzILOpPure *op_RSHIFT_51 = SHIFTRA(op_LSHIFT_49, SN(32, 16)); + RzILOpPure *op_ADD_52 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_16), DUP(op_AND_16))), CAST(32, MSB(DUP(op_AND_16)), DUP(op_AND_16))), op_RSHIFT_51); + RzILOpPure *op_RSHIFT_61 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_63 = LOGAND(op_RSHIFT_61, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_69 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_71 = LOGAND(op_RSHIFT_69, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_82 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_85 = LOGAND(op_RSHIFT_82, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_92 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_71), DUP(op_AND_71))), CAST(32, MSB(DUP(op_AND_71)), DUP(op_AND_71)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_71)), DUP(op_AND_71))), CAST(32, MSB(DUP(op_AND_71)), DUP(op_AND_71))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_71)), DUP(op_AND_71))), CAST(32, MSB(DUP(op_AND_71)), DUP(op_AND_71)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_71)), DUP(op_AND_71))), CAST(32, MSB(DUP(op_AND_71)), DUP(op_AND_71))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_85), DUP(op_AND_85))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_94 = SHIFTL0(op_MUL_92, SN(32, 0)); + RzILOpPure *op_RSHIFT_96 = SHIFTRA(op_LSHIFT_94, SN(32, 16)); + RzILOpPure *op_ADD_97 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_63), DUP(op_AND_63))), CAST(32, MSB(DUP(op_AND_63)), DUP(op_AND_63))), op_RSHIFT_96); + RzILOpPure *op_EQ_98 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_52), SN(32, 0), SN(32, 0x20)), op_ADD_97); + RzILOpPure *op_RSHIFT_145 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_147 = LOGAND(op_RSHIFT_145, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_153 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_155 = LOGAND(op_RSHIFT_153, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_166 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_169 = LOGAND(op_RSHIFT_166, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_176 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_155), DUP(op_AND_155))), CAST(32, MSB(DUP(op_AND_155)), DUP(op_AND_155)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_155)), DUP(op_AND_155))), CAST(32, MSB(DUP(op_AND_155)), DUP(op_AND_155))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_155)), DUP(op_AND_155))), CAST(32, MSB(DUP(op_AND_155)), DUP(op_AND_155)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_155)), DUP(op_AND_155))), CAST(32, MSB(DUP(op_AND_155)), DUP(op_AND_155))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_169), DUP(op_AND_169))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_178 = SHIFTL0(op_MUL_176, SN(32, 0)); + RzILOpPure *op_RSHIFT_180 = SHIFTRA(op_LSHIFT_178, SN(32, 16)); + RzILOpPure *op_ADD_181 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_147), DUP(op_AND_147))), CAST(32, MSB(DUP(op_AND_147)), DUP(op_AND_147))), op_RSHIFT_180); + RzILOpPure *op_LT_184 = SLT(op_ADD_181, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_189 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_190 = NEG(op_LSHIFT_189); + RzILOpPure *op_LSHIFT_195 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_198 = SUB(op_LSHIFT_195, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_199 = ITE(op_LT_184, op_NEG_190, op_SUB_198); + RzILOpEffect *gcc_expr_200 = BRANCH(op_EQ_98, EMPTY(), set_usr_field_call_141); + + // h_tmp280 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_202 = SETL("h_tmp280", cond_199); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_203 = SEQN(2, gcc_expr_200, op_ASSIGN_hybrid_tmp_202); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10)) ? ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10) : h_tmp280) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_102 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_104 = LOGAND(op_RSHIFT_102, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_110 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_112 = LOGAND(op_RSHIFT_110, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_123 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_126 = LOGAND(op_RSHIFT_123, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_133 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_112), DUP(op_AND_112))), CAST(32, MSB(DUP(op_AND_112)), DUP(op_AND_112)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_112)), DUP(op_AND_112))), CAST(32, MSB(DUP(op_AND_112)), DUP(op_AND_112))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_112)), DUP(op_AND_112))), CAST(32, MSB(DUP(op_AND_112)), DUP(op_AND_112)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_112)), DUP(op_AND_112))), CAST(32, MSB(DUP(op_AND_112)), DUP(op_AND_112))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_126), DUP(op_AND_126))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_135 = SHIFTL0(op_MUL_133, SN(32, 0)); + RzILOpPure *op_RSHIFT_137 = SHIFTRA(op_LSHIFT_135, SN(32, 16)); + RzILOpPure *op_ADD_138 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_104), DUP(op_AND_104))), CAST(32, MSB(DUP(op_AND_104)), DUP(op_AND_104))), op_RSHIFT_137); + RzILOpPure *cond_204 = ITE(DUP(op_EQ_98), op_ADD_138, VARL("h_tmp280")); + RzILOpPure *op_AND_206 = LOGAND(cond_204, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_210 = SHIFTL0(op_AND_206, SN(32, 0x20)); + RzILOpPure *op_OR_211 = LOGOR(op_AND_7, op_LSHIFT_210); + RzILOpEffect *op_ASSIGN_212 = WRITE_REG(bundle, Rxx_op, op_OR_211); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_213 = SEQN(2, seq_203, op_ASSIGN_212); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_353 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_228 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_230 = LOGAND(op_RSHIFT_228, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_236 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_238 = LOGAND(op_RSHIFT_236, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_249 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_252 = LOGAND(op_RSHIFT_249, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_259 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_238), DUP(op_AND_238))), CAST(32, MSB(DUP(op_AND_238)), DUP(op_AND_238)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_238)), DUP(op_AND_238))), CAST(32, MSB(DUP(op_AND_238)), DUP(op_AND_238))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_238)), DUP(op_AND_238))), CAST(32, MSB(DUP(op_AND_238)), DUP(op_AND_238)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_238)), DUP(op_AND_238))), CAST(32, MSB(DUP(op_AND_238)), DUP(op_AND_238))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_252), DUP(op_AND_252))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_261 = SHIFTL0(op_MUL_259, SN(32, 0)); + RzILOpPure *op_RSHIFT_263 = SHIFTRA(op_LSHIFT_261, SN(32, 16)); + RzILOpPure *op_ADD_264 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_230), DUP(op_AND_230))), CAST(32, MSB(DUP(op_AND_230)), DUP(op_AND_230))), op_RSHIFT_263); + RzILOpPure *op_RSHIFT_273 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_275 = LOGAND(op_RSHIFT_273, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_281 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_283 = LOGAND(op_RSHIFT_281, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_294 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_297 = LOGAND(op_RSHIFT_294, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_304 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_283), DUP(op_AND_283))), CAST(32, MSB(DUP(op_AND_283)), DUP(op_AND_283)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_283)), DUP(op_AND_283))), CAST(32, MSB(DUP(op_AND_283)), DUP(op_AND_283))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_283)), DUP(op_AND_283))), CAST(32, MSB(DUP(op_AND_283)), DUP(op_AND_283)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_283)), DUP(op_AND_283))), CAST(32, MSB(DUP(op_AND_283)), DUP(op_AND_283))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_297), DUP(op_AND_297))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_306 = SHIFTL0(op_MUL_304, SN(32, 0)); + RzILOpPure *op_RSHIFT_308 = SHIFTRA(op_LSHIFT_306, SN(32, 16)); + RzILOpPure *op_ADD_309 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_275), DUP(op_AND_275))), CAST(32, MSB(DUP(op_AND_275)), DUP(op_AND_275))), op_RSHIFT_308); + RzILOpPure *op_EQ_310 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_264), SN(32, 0), SN(32, 0x20)), op_ADD_309); + RzILOpPure *op_RSHIFT_357 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_359 = LOGAND(op_RSHIFT_357, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_365 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_367 = LOGAND(op_RSHIFT_365, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_378 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_381 = LOGAND(op_RSHIFT_378, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_388 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_367), DUP(op_AND_367))), CAST(32, MSB(DUP(op_AND_367)), DUP(op_AND_367)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_367)), DUP(op_AND_367))), CAST(32, MSB(DUP(op_AND_367)), DUP(op_AND_367))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_367)), DUP(op_AND_367))), CAST(32, MSB(DUP(op_AND_367)), DUP(op_AND_367)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_367)), DUP(op_AND_367))), CAST(32, MSB(DUP(op_AND_367)), DUP(op_AND_367))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_381), DUP(op_AND_381))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_390 = SHIFTL0(op_MUL_388, SN(32, 0)); + RzILOpPure *op_RSHIFT_392 = SHIFTRA(op_LSHIFT_390, SN(32, 16)); + RzILOpPure *op_ADD_393 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_359), DUP(op_AND_359))), CAST(32, MSB(DUP(op_AND_359)), DUP(op_AND_359))), op_RSHIFT_392); + RzILOpPure *op_LT_396 = SLT(op_ADD_393, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_401 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_402 = NEG(op_LSHIFT_401); + RzILOpPure *op_LSHIFT_407 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_410 = SUB(op_LSHIFT_407, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_411 = ITE(op_LT_396, op_NEG_402, op_SUB_410); + RzILOpEffect *gcc_expr_412 = BRANCH(op_EQ_310, EMPTY(), set_usr_field_call_353); + + // h_tmp281 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_414 = SETL("h_tmp281", cond_411); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_415 = SEQN(2, gcc_expr_412, op_ASSIGN_hybrid_tmp_414); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10)) ? ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10) : h_tmp281) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_219 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_220 = LOGNOT(op_LSHIFT_219); + RzILOpPure *op_AND_221 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_220); + RzILOpPure *op_RSHIFT_314 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_316 = LOGAND(op_RSHIFT_314, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_322 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_324 = LOGAND(op_RSHIFT_322, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_335 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_338 = LOGAND(op_RSHIFT_335, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_345 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_324), DUP(op_AND_324))), CAST(32, MSB(DUP(op_AND_324)), DUP(op_AND_324)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_324)), DUP(op_AND_324))), CAST(32, MSB(DUP(op_AND_324)), DUP(op_AND_324))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_324)), DUP(op_AND_324))), CAST(32, MSB(DUP(op_AND_324)), DUP(op_AND_324)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_324)), DUP(op_AND_324))), CAST(32, MSB(DUP(op_AND_324)), DUP(op_AND_324))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_338), DUP(op_AND_338))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_347 = SHIFTL0(op_MUL_345, SN(32, 0)); + RzILOpPure *op_RSHIFT_349 = SHIFTRA(op_LSHIFT_347, SN(32, 16)); + RzILOpPure *op_ADD_350 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_316), DUP(op_AND_316))), CAST(32, MSB(DUP(op_AND_316)), DUP(op_AND_316))), op_RSHIFT_349); + RzILOpPure *cond_416 = ITE(DUP(op_EQ_310), op_ADD_350, VARL("h_tmp281")); + RzILOpPure *op_AND_418 = LOGAND(cond_416, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_422 = SHIFTL0(op_AND_418, SN(32, 0)); + RzILOpPure *op_OR_423 = LOGOR(op_AND_221, op_LSHIFT_422); + RzILOpEffect *op_ASSIGN_424 = WRITE_REG(bundle, Rxx_op, op_OR_423); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_425 = SEQN(2, seq_415, op_ASSIGN_424); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_213, seq_425); + return instruction_sequence; +} + +// Rxx += vmpyweh(Rss,Rtt):<<1:sat +RzILOpEffect *hex_il_op_m2_mmacls_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_141 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(Rtt, SN(32, 0x20)); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_37, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_47 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_25), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_40), DUP(op_AND_40))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_49 = SHIFTL0(op_MUL_47, SN(32, 1)); + RzILOpPure *op_RSHIFT_51 = SHIFTRA(op_LSHIFT_49, SN(32, 16)); + RzILOpPure *op_ADD_52 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_16), DUP(op_AND_16))), CAST(32, MSB(DUP(op_AND_16)), DUP(op_AND_16))), op_RSHIFT_51); + RzILOpPure *op_RSHIFT_61 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_63 = LOGAND(op_RSHIFT_61, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_69 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_71 = LOGAND(op_RSHIFT_69, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_82 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_85 = LOGAND(op_RSHIFT_82, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_92 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_71), DUP(op_AND_71))), CAST(32, MSB(DUP(op_AND_71)), DUP(op_AND_71)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_71)), DUP(op_AND_71))), CAST(32, MSB(DUP(op_AND_71)), DUP(op_AND_71))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_71)), DUP(op_AND_71))), CAST(32, MSB(DUP(op_AND_71)), DUP(op_AND_71)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_71)), DUP(op_AND_71))), CAST(32, MSB(DUP(op_AND_71)), DUP(op_AND_71))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_85), DUP(op_AND_85))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_94 = SHIFTL0(op_MUL_92, SN(32, 1)); + RzILOpPure *op_RSHIFT_96 = SHIFTRA(op_LSHIFT_94, SN(32, 16)); + RzILOpPure *op_ADD_97 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_63), DUP(op_AND_63))), CAST(32, MSB(DUP(op_AND_63)), DUP(op_AND_63))), op_RSHIFT_96); + RzILOpPure *op_EQ_98 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_52), SN(32, 0), SN(32, 0x20)), op_ADD_97); + RzILOpPure *op_RSHIFT_145 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_147 = LOGAND(op_RSHIFT_145, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_153 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_155 = LOGAND(op_RSHIFT_153, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_166 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_169 = LOGAND(op_RSHIFT_166, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_176 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_155), DUP(op_AND_155))), CAST(32, MSB(DUP(op_AND_155)), DUP(op_AND_155)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_155)), DUP(op_AND_155))), CAST(32, MSB(DUP(op_AND_155)), DUP(op_AND_155))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_155)), DUP(op_AND_155))), CAST(32, MSB(DUP(op_AND_155)), DUP(op_AND_155)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_155)), DUP(op_AND_155))), CAST(32, MSB(DUP(op_AND_155)), DUP(op_AND_155))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_169), DUP(op_AND_169))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_178 = SHIFTL0(op_MUL_176, SN(32, 1)); + RzILOpPure *op_RSHIFT_180 = SHIFTRA(op_LSHIFT_178, SN(32, 16)); + RzILOpPure *op_ADD_181 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_147), DUP(op_AND_147))), CAST(32, MSB(DUP(op_AND_147)), DUP(op_AND_147))), op_RSHIFT_180); + RzILOpPure *op_LT_184 = SLT(op_ADD_181, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_189 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_190 = NEG(op_LSHIFT_189); + RzILOpPure *op_LSHIFT_195 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_198 = SUB(op_LSHIFT_195, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_199 = ITE(op_LT_184, op_NEG_190, op_SUB_198); + RzILOpEffect *gcc_expr_200 = BRANCH(op_EQ_98, EMPTY(), set_usr_field_call_141); + + // h_tmp282 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_202 = SETL("h_tmp282", cond_199); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_203 = SEQN(2, gcc_expr_200, op_ASSIGN_hybrid_tmp_202); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10)) ? ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10) : h_tmp282) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_102 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_104 = LOGAND(op_RSHIFT_102, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_110 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_112 = LOGAND(op_RSHIFT_110, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_123 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_126 = LOGAND(op_RSHIFT_123, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_133 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_112), DUP(op_AND_112))), CAST(32, MSB(DUP(op_AND_112)), DUP(op_AND_112)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_112)), DUP(op_AND_112))), CAST(32, MSB(DUP(op_AND_112)), DUP(op_AND_112))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_112)), DUP(op_AND_112))), CAST(32, MSB(DUP(op_AND_112)), DUP(op_AND_112)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_112)), DUP(op_AND_112))), CAST(32, MSB(DUP(op_AND_112)), DUP(op_AND_112))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_126), DUP(op_AND_126))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_135 = SHIFTL0(op_MUL_133, SN(32, 1)); + RzILOpPure *op_RSHIFT_137 = SHIFTRA(op_LSHIFT_135, SN(32, 16)); + RzILOpPure *op_ADD_138 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_104), DUP(op_AND_104))), CAST(32, MSB(DUP(op_AND_104)), DUP(op_AND_104))), op_RSHIFT_137); + RzILOpPure *cond_204 = ITE(DUP(op_EQ_98), op_ADD_138, VARL("h_tmp282")); + RzILOpPure *op_AND_206 = LOGAND(cond_204, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_210 = SHIFTL0(op_AND_206, SN(32, 0x20)); + RzILOpPure *op_OR_211 = LOGOR(op_AND_7, op_LSHIFT_210); + RzILOpEffect *op_ASSIGN_212 = WRITE_REG(bundle, Rxx_op, op_OR_211); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_213 = SEQN(2, seq_203, op_ASSIGN_212); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_353 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_228 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_230 = LOGAND(op_RSHIFT_228, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_236 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_238 = LOGAND(op_RSHIFT_236, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_249 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_252 = LOGAND(op_RSHIFT_249, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_259 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_238), DUP(op_AND_238))), CAST(32, MSB(DUP(op_AND_238)), DUP(op_AND_238)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_238)), DUP(op_AND_238))), CAST(32, MSB(DUP(op_AND_238)), DUP(op_AND_238))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_238)), DUP(op_AND_238))), CAST(32, MSB(DUP(op_AND_238)), DUP(op_AND_238)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_238)), DUP(op_AND_238))), CAST(32, MSB(DUP(op_AND_238)), DUP(op_AND_238))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_252), DUP(op_AND_252))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_261 = SHIFTL0(op_MUL_259, SN(32, 1)); + RzILOpPure *op_RSHIFT_263 = SHIFTRA(op_LSHIFT_261, SN(32, 16)); + RzILOpPure *op_ADD_264 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_230), DUP(op_AND_230))), CAST(32, MSB(DUP(op_AND_230)), DUP(op_AND_230))), op_RSHIFT_263); + RzILOpPure *op_RSHIFT_273 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_275 = LOGAND(op_RSHIFT_273, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_281 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_283 = LOGAND(op_RSHIFT_281, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_294 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_297 = LOGAND(op_RSHIFT_294, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_304 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_283), DUP(op_AND_283))), CAST(32, MSB(DUP(op_AND_283)), DUP(op_AND_283)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_283)), DUP(op_AND_283))), CAST(32, MSB(DUP(op_AND_283)), DUP(op_AND_283))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_283)), DUP(op_AND_283))), CAST(32, MSB(DUP(op_AND_283)), DUP(op_AND_283)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_283)), DUP(op_AND_283))), CAST(32, MSB(DUP(op_AND_283)), DUP(op_AND_283))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_297), DUP(op_AND_297))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_306 = SHIFTL0(op_MUL_304, SN(32, 1)); + RzILOpPure *op_RSHIFT_308 = SHIFTRA(op_LSHIFT_306, SN(32, 16)); + RzILOpPure *op_ADD_309 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_275), DUP(op_AND_275))), CAST(32, MSB(DUP(op_AND_275)), DUP(op_AND_275))), op_RSHIFT_308); + RzILOpPure *op_EQ_310 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_264), SN(32, 0), SN(32, 0x20)), op_ADD_309); + RzILOpPure *op_RSHIFT_357 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_359 = LOGAND(op_RSHIFT_357, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_365 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_367 = LOGAND(op_RSHIFT_365, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_378 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_381 = LOGAND(op_RSHIFT_378, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_388 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_367), DUP(op_AND_367))), CAST(32, MSB(DUP(op_AND_367)), DUP(op_AND_367)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_367)), DUP(op_AND_367))), CAST(32, MSB(DUP(op_AND_367)), DUP(op_AND_367))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_367)), DUP(op_AND_367))), CAST(32, MSB(DUP(op_AND_367)), DUP(op_AND_367)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_367)), DUP(op_AND_367))), CAST(32, MSB(DUP(op_AND_367)), DUP(op_AND_367))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_381), DUP(op_AND_381))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_390 = SHIFTL0(op_MUL_388, SN(32, 1)); + RzILOpPure *op_RSHIFT_392 = SHIFTRA(op_LSHIFT_390, SN(32, 16)); + RzILOpPure *op_ADD_393 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_359), DUP(op_AND_359))), CAST(32, MSB(DUP(op_AND_359)), DUP(op_AND_359))), op_RSHIFT_392); + RzILOpPure *op_LT_396 = SLT(op_ADD_393, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_401 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_402 = NEG(op_LSHIFT_401); + RzILOpPure *op_LSHIFT_407 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_410 = SUB(op_LSHIFT_407, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_411 = ITE(op_LT_396, op_NEG_402, op_SUB_410); + RzILOpEffect *gcc_expr_412 = BRANCH(op_EQ_310, EMPTY(), set_usr_field_call_353); + + // h_tmp283 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_414 = SETL("h_tmp283", cond_411); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_415 = SEQN(2, gcc_expr_412, op_ASSIGN_hybrid_tmp_414); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10)) ? ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10) : h_tmp283) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_219 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_220 = LOGNOT(op_LSHIFT_219); + RzILOpPure *op_AND_221 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_220); + RzILOpPure *op_RSHIFT_314 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_316 = LOGAND(op_RSHIFT_314, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_322 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_324 = LOGAND(op_RSHIFT_322, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_335 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_338 = LOGAND(op_RSHIFT_335, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_345 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_324), DUP(op_AND_324))), CAST(32, MSB(DUP(op_AND_324)), DUP(op_AND_324)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_324)), DUP(op_AND_324))), CAST(32, MSB(DUP(op_AND_324)), DUP(op_AND_324))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_324)), DUP(op_AND_324))), CAST(32, MSB(DUP(op_AND_324)), DUP(op_AND_324)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_324)), DUP(op_AND_324))), CAST(32, MSB(DUP(op_AND_324)), DUP(op_AND_324))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_338), DUP(op_AND_338))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_347 = SHIFTL0(op_MUL_345, SN(32, 1)); + RzILOpPure *op_RSHIFT_349 = SHIFTRA(op_LSHIFT_347, SN(32, 16)); + RzILOpPure *op_ADD_350 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_316), DUP(op_AND_316))), CAST(32, MSB(DUP(op_AND_316)), DUP(op_AND_316))), op_RSHIFT_349); + RzILOpPure *cond_416 = ITE(DUP(op_EQ_310), op_ADD_350, VARL("h_tmp283")); + RzILOpPure *op_AND_418 = LOGAND(cond_416, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_422 = SHIFTL0(op_AND_418, SN(32, 0)); + RzILOpPure *op_OR_423 = LOGOR(op_AND_221, op_LSHIFT_422); + RzILOpEffect *op_ASSIGN_424 = WRITE_REG(bundle, Rxx_op, op_OR_423); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_425 = SEQN(2, seq_415, op_ASSIGN_424); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_213, seq_425); + return instruction_sequence; +} + +// Rxx += vmpywouh(Rss,Rtt):rnd:sat +RzILOpEffect *hex_il_op_m2_mmacuhs_rs0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_156 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(Rtt, SN(32, 0x30)); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_37, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_48 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_25), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_40)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_51 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_48), SN(32, 0)); + RzILOpPure *op_ADD_54 = ADD(op_LSHIFT_51, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_56 = SHIFTRA(op_ADD_54, SN(32, 16)); + RzILOpPure *op_ADD_57 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_16), DUP(op_AND_16))), CAST(32, MSB(DUP(op_AND_16)), DUP(op_AND_16))), op_RSHIFT_56); + RzILOpPure *op_RSHIFT_66 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_68 = LOGAND(op_RSHIFT_66, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_74 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_76 = LOGAND(op_RSHIFT_74, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_87 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_90 = LOGAND(op_RSHIFT_87, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_98 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_76), DUP(op_AND_76))), CAST(32, MSB(DUP(op_AND_76)), DUP(op_AND_76)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_76)), DUP(op_AND_76))), CAST(32, MSB(DUP(op_AND_76)), DUP(op_AND_76))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_76)), DUP(op_AND_76))), CAST(32, MSB(DUP(op_AND_76)), DUP(op_AND_76)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_76)), DUP(op_AND_76))), CAST(32, MSB(DUP(op_AND_76)), DUP(op_AND_76)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_90)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_101 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_98), SN(32, 0)); + RzILOpPure *op_ADD_104 = ADD(op_LSHIFT_101, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_106 = SHIFTRA(op_ADD_104, SN(32, 16)); + RzILOpPure *op_ADD_107 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_68), DUP(op_AND_68))), CAST(32, MSB(DUP(op_AND_68)), DUP(op_AND_68))), op_RSHIFT_106); + RzILOpPure *op_EQ_108 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_57), SN(32, 0), SN(32, 0x20)), op_ADD_107); + RzILOpPure *op_RSHIFT_160 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_162 = LOGAND(op_RSHIFT_160, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_168 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_170 = LOGAND(op_RSHIFT_168, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_181 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_184 = LOGAND(op_RSHIFT_181, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_192 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_170), DUP(op_AND_170))), CAST(32, MSB(DUP(op_AND_170)), DUP(op_AND_170)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_170)), DUP(op_AND_170))), CAST(32, MSB(DUP(op_AND_170)), DUP(op_AND_170))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_170)), DUP(op_AND_170))), CAST(32, MSB(DUP(op_AND_170)), DUP(op_AND_170)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_170)), DUP(op_AND_170))), CAST(32, MSB(DUP(op_AND_170)), DUP(op_AND_170)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_184)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_195 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_192), SN(32, 0)); + RzILOpPure *op_ADD_198 = ADD(op_LSHIFT_195, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_200 = SHIFTRA(op_ADD_198, SN(32, 16)); + RzILOpPure *op_ADD_201 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_162), DUP(op_AND_162))), CAST(32, MSB(DUP(op_AND_162)), DUP(op_AND_162))), op_RSHIFT_200); + RzILOpPure *op_LT_204 = SLT(op_ADD_201, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_209 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_210 = NEG(op_LSHIFT_209); + RzILOpPure *op_LSHIFT_215 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_218 = SUB(op_LSHIFT_215, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_219 = ITE(op_LT_204, op_NEG_210, op_SUB_218); + RzILOpEffect *gcc_expr_220 = BRANCH(op_EQ_108, EMPTY(), set_usr_field_call_156); + + // h_tmp284 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_222 = SETL("h_tmp284", cond_219); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_223 = SEQN(2, gcc_expr_220, op_ASSIGN_hybrid_tmp_222); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10)) ? ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10) : h_tmp284) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_112 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_114 = LOGAND(op_RSHIFT_112, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_120 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_122 = LOGAND(op_RSHIFT_120, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_133 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_136 = LOGAND(op_RSHIFT_133, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_144 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_122), DUP(op_AND_122))), CAST(32, MSB(DUP(op_AND_122)), DUP(op_AND_122)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_122)), DUP(op_AND_122))), CAST(32, MSB(DUP(op_AND_122)), DUP(op_AND_122))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_122)), DUP(op_AND_122))), CAST(32, MSB(DUP(op_AND_122)), DUP(op_AND_122)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_122)), DUP(op_AND_122))), CAST(32, MSB(DUP(op_AND_122)), DUP(op_AND_122)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_136)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_147 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_144), SN(32, 0)); + RzILOpPure *op_ADD_150 = ADD(op_LSHIFT_147, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_152 = SHIFTRA(op_ADD_150, SN(32, 16)); + RzILOpPure *op_ADD_153 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_114), DUP(op_AND_114))), CAST(32, MSB(DUP(op_AND_114)), DUP(op_AND_114))), op_RSHIFT_152); + RzILOpPure *cond_224 = ITE(DUP(op_EQ_108), op_ADD_153, VARL("h_tmp284")); + RzILOpPure *op_AND_226 = LOGAND(cond_224, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_230 = SHIFTL0(op_AND_226, SN(32, 0x20)); + RzILOpPure *op_OR_231 = LOGOR(op_AND_7, op_LSHIFT_230); + RzILOpEffect *op_ASSIGN_232 = WRITE_REG(bundle, Rxx_op, op_OR_231); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_233 = SEQN(2, seq_223, op_ASSIGN_232); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_388 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_248 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_250 = LOGAND(op_RSHIFT_248, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_256 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_258 = LOGAND(op_RSHIFT_256, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_269 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_272 = LOGAND(op_RSHIFT_269, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_280 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_258), DUP(op_AND_258))), CAST(32, MSB(DUP(op_AND_258)), DUP(op_AND_258)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_258)), DUP(op_AND_258))), CAST(32, MSB(DUP(op_AND_258)), DUP(op_AND_258))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_258)), DUP(op_AND_258))), CAST(32, MSB(DUP(op_AND_258)), DUP(op_AND_258)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_258)), DUP(op_AND_258))), CAST(32, MSB(DUP(op_AND_258)), DUP(op_AND_258)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_272)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_283 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_280), SN(32, 0)); + RzILOpPure *op_ADD_286 = ADD(op_LSHIFT_283, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_288 = SHIFTRA(op_ADD_286, SN(32, 16)); + RzILOpPure *op_ADD_289 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_250), DUP(op_AND_250))), CAST(32, MSB(DUP(op_AND_250)), DUP(op_AND_250))), op_RSHIFT_288); + RzILOpPure *op_RSHIFT_298 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_300 = LOGAND(op_RSHIFT_298, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_306 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_308 = LOGAND(op_RSHIFT_306, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_319 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_322 = LOGAND(op_RSHIFT_319, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_330 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_308), DUP(op_AND_308))), CAST(32, MSB(DUP(op_AND_308)), DUP(op_AND_308)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_308)), DUP(op_AND_308))), CAST(32, MSB(DUP(op_AND_308)), DUP(op_AND_308))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_308)), DUP(op_AND_308))), CAST(32, MSB(DUP(op_AND_308)), DUP(op_AND_308)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_308)), DUP(op_AND_308))), CAST(32, MSB(DUP(op_AND_308)), DUP(op_AND_308)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_322)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_333 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_330), SN(32, 0)); + RzILOpPure *op_ADD_336 = ADD(op_LSHIFT_333, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_338 = SHIFTRA(op_ADD_336, SN(32, 16)); + RzILOpPure *op_ADD_339 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_300), DUP(op_AND_300))), CAST(32, MSB(DUP(op_AND_300)), DUP(op_AND_300))), op_RSHIFT_338); + RzILOpPure *op_EQ_340 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_289), SN(32, 0), SN(32, 0x20)), op_ADD_339); + RzILOpPure *op_RSHIFT_392 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_394 = LOGAND(op_RSHIFT_392, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_400 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_402 = LOGAND(op_RSHIFT_400, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_413 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_416 = LOGAND(op_RSHIFT_413, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_424 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_402), DUP(op_AND_402))), CAST(32, MSB(DUP(op_AND_402)), DUP(op_AND_402)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_402)), DUP(op_AND_402))), CAST(32, MSB(DUP(op_AND_402)), DUP(op_AND_402))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_402)), DUP(op_AND_402))), CAST(32, MSB(DUP(op_AND_402)), DUP(op_AND_402)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_402)), DUP(op_AND_402))), CAST(32, MSB(DUP(op_AND_402)), DUP(op_AND_402)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_416)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_427 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_424), SN(32, 0)); + RzILOpPure *op_ADD_430 = ADD(op_LSHIFT_427, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_432 = SHIFTRA(op_ADD_430, SN(32, 16)); + RzILOpPure *op_ADD_433 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_394), DUP(op_AND_394))), CAST(32, MSB(DUP(op_AND_394)), DUP(op_AND_394))), op_RSHIFT_432); + RzILOpPure *op_LT_436 = SLT(op_ADD_433, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_441 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_442 = NEG(op_LSHIFT_441); + RzILOpPure *op_LSHIFT_447 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_450 = SUB(op_LSHIFT_447, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_451 = ITE(op_LT_436, op_NEG_442, op_SUB_450); + RzILOpEffect *gcc_expr_452 = BRANCH(op_EQ_340, EMPTY(), set_usr_field_call_388); + + // h_tmp285 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_454 = SETL("h_tmp285", cond_451); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_455 = SEQN(2, gcc_expr_452, op_ASSIGN_hybrid_tmp_454); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10)) ? ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10) : h_tmp285) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_239 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_240 = LOGNOT(op_LSHIFT_239); + RzILOpPure *op_AND_241 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_240); + RzILOpPure *op_RSHIFT_344 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_346 = LOGAND(op_RSHIFT_344, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_352 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_354 = LOGAND(op_RSHIFT_352, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_365 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_368 = LOGAND(op_RSHIFT_365, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_376 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_354), DUP(op_AND_354))), CAST(32, MSB(DUP(op_AND_354)), DUP(op_AND_354)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_354)), DUP(op_AND_354))), CAST(32, MSB(DUP(op_AND_354)), DUP(op_AND_354))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_354)), DUP(op_AND_354))), CAST(32, MSB(DUP(op_AND_354)), DUP(op_AND_354)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_354)), DUP(op_AND_354))), CAST(32, MSB(DUP(op_AND_354)), DUP(op_AND_354)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_368)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_379 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_376), SN(32, 0)); + RzILOpPure *op_ADD_382 = ADD(op_LSHIFT_379, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_384 = SHIFTRA(op_ADD_382, SN(32, 16)); + RzILOpPure *op_ADD_385 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_346), DUP(op_AND_346))), CAST(32, MSB(DUP(op_AND_346)), DUP(op_AND_346))), op_RSHIFT_384); + RzILOpPure *cond_456 = ITE(DUP(op_EQ_340), op_ADD_385, VARL("h_tmp285")); + RzILOpPure *op_AND_458 = LOGAND(cond_456, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_462 = SHIFTL0(op_AND_458, SN(32, 0)); + RzILOpPure *op_OR_463 = LOGOR(op_AND_241, op_LSHIFT_462); + RzILOpEffect *op_ASSIGN_464 = WRITE_REG(bundle, Rxx_op, op_OR_463); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_465 = SEQN(2, seq_455, op_ASSIGN_464); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_233, seq_465); + return instruction_sequence; +} + +// Rxx += vmpywouh(Rss,Rtt):<<1:rnd:sat +RzILOpEffect *hex_il_op_m2_mmacuhs_rs1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_156 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(Rtt, SN(32, 0x30)); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_37, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_48 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_25), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_40)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_51 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_48), SN(32, 1)); + RzILOpPure *op_ADD_54 = ADD(op_LSHIFT_51, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_56 = SHIFTRA(op_ADD_54, SN(32, 16)); + RzILOpPure *op_ADD_57 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_16), DUP(op_AND_16))), CAST(32, MSB(DUP(op_AND_16)), DUP(op_AND_16))), op_RSHIFT_56); + RzILOpPure *op_RSHIFT_66 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_68 = LOGAND(op_RSHIFT_66, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_74 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_76 = LOGAND(op_RSHIFT_74, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_87 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_90 = LOGAND(op_RSHIFT_87, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_98 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_76), DUP(op_AND_76))), CAST(32, MSB(DUP(op_AND_76)), DUP(op_AND_76)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_76)), DUP(op_AND_76))), CAST(32, MSB(DUP(op_AND_76)), DUP(op_AND_76))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_76)), DUP(op_AND_76))), CAST(32, MSB(DUP(op_AND_76)), DUP(op_AND_76)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_76)), DUP(op_AND_76))), CAST(32, MSB(DUP(op_AND_76)), DUP(op_AND_76)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_90)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_101 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_98), SN(32, 1)); + RzILOpPure *op_ADD_104 = ADD(op_LSHIFT_101, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_106 = SHIFTRA(op_ADD_104, SN(32, 16)); + RzILOpPure *op_ADD_107 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_68), DUP(op_AND_68))), CAST(32, MSB(DUP(op_AND_68)), DUP(op_AND_68))), op_RSHIFT_106); + RzILOpPure *op_EQ_108 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_57), SN(32, 0), SN(32, 0x20)), op_ADD_107); + RzILOpPure *op_RSHIFT_160 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_162 = LOGAND(op_RSHIFT_160, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_168 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_170 = LOGAND(op_RSHIFT_168, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_181 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_184 = LOGAND(op_RSHIFT_181, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_192 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_170), DUP(op_AND_170))), CAST(32, MSB(DUP(op_AND_170)), DUP(op_AND_170)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_170)), DUP(op_AND_170))), CAST(32, MSB(DUP(op_AND_170)), DUP(op_AND_170))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_170)), DUP(op_AND_170))), CAST(32, MSB(DUP(op_AND_170)), DUP(op_AND_170)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_170)), DUP(op_AND_170))), CAST(32, MSB(DUP(op_AND_170)), DUP(op_AND_170)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_184)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_195 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_192), SN(32, 1)); + RzILOpPure *op_ADD_198 = ADD(op_LSHIFT_195, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_200 = SHIFTRA(op_ADD_198, SN(32, 16)); + RzILOpPure *op_ADD_201 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_162), DUP(op_AND_162))), CAST(32, MSB(DUP(op_AND_162)), DUP(op_AND_162))), op_RSHIFT_200); + RzILOpPure *op_LT_204 = SLT(op_ADD_201, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_209 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_210 = NEG(op_LSHIFT_209); + RzILOpPure *op_LSHIFT_215 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_218 = SUB(op_LSHIFT_215, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_219 = ITE(op_LT_204, op_NEG_210, op_SUB_218); + RzILOpEffect *gcc_expr_220 = BRANCH(op_EQ_108, EMPTY(), set_usr_field_call_156); + + // h_tmp286 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_222 = SETL("h_tmp286", cond_219); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_223 = SEQN(2, gcc_expr_220, op_ASSIGN_hybrid_tmp_222); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10)) ? ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10) : h_tmp286) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_112 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_114 = LOGAND(op_RSHIFT_112, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_120 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_122 = LOGAND(op_RSHIFT_120, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_133 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_136 = LOGAND(op_RSHIFT_133, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_144 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_122), DUP(op_AND_122))), CAST(32, MSB(DUP(op_AND_122)), DUP(op_AND_122)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_122)), DUP(op_AND_122))), CAST(32, MSB(DUP(op_AND_122)), DUP(op_AND_122))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_122)), DUP(op_AND_122))), CAST(32, MSB(DUP(op_AND_122)), DUP(op_AND_122)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_122)), DUP(op_AND_122))), CAST(32, MSB(DUP(op_AND_122)), DUP(op_AND_122)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_136)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_147 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_144), SN(32, 1)); + RzILOpPure *op_ADD_150 = ADD(op_LSHIFT_147, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_152 = SHIFTRA(op_ADD_150, SN(32, 16)); + RzILOpPure *op_ADD_153 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_114), DUP(op_AND_114))), CAST(32, MSB(DUP(op_AND_114)), DUP(op_AND_114))), op_RSHIFT_152); + RzILOpPure *cond_224 = ITE(DUP(op_EQ_108), op_ADD_153, VARL("h_tmp286")); + RzILOpPure *op_AND_226 = LOGAND(cond_224, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_230 = SHIFTL0(op_AND_226, SN(32, 0x20)); + RzILOpPure *op_OR_231 = LOGOR(op_AND_7, op_LSHIFT_230); + RzILOpEffect *op_ASSIGN_232 = WRITE_REG(bundle, Rxx_op, op_OR_231); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_233 = SEQN(2, seq_223, op_ASSIGN_232); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_388 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_248 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_250 = LOGAND(op_RSHIFT_248, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_256 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_258 = LOGAND(op_RSHIFT_256, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_269 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_272 = LOGAND(op_RSHIFT_269, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_280 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_258), DUP(op_AND_258))), CAST(32, MSB(DUP(op_AND_258)), DUP(op_AND_258)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_258)), DUP(op_AND_258))), CAST(32, MSB(DUP(op_AND_258)), DUP(op_AND_258))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_258)), DUP(op_AND_258))), CAST(32, MSB(DUP(op_AND_258)), DUP(op_AND_258)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_258)), DUP(op_AND_258))), CAST(32, MSB(DUP(op_AND_258)), DUP(op_AND_258)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_272)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_283 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_280), SN(32, 1)); + RzILOpPure *op_ADD_286 = ADD(op_LSHIFT_283, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_288 = SHIFTRA(op_ADD_286, SN(32, 16)); + RzILOpPure *op_ADD_289 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_250), DUP(op_AND_250))), CAST(32, MSB(DUP(op_AND_250)), DUP(op_AND_250))), op_RSHIFT_288); + RzILOpPure *op_RSHIFT_298 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_300 = LOGAND(op_RSHIFT_298, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_306 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_308 = LOGAND(op_RSHIFT_306, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_319 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_322 = LOGAND(op_RSHIFT_319, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_330 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_308), DUP(op_AND_308))), CAST(32, MSB(DUP(op_AND_308)), DUP(op_AND_308)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_308)), DUP(op_AND_308))), CAST(32, MSB(DUP(op_AND_308)), DUP(op_AND_308))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_308)), DUP(op_AND_308))), CAST(32, MSB(DUP(op_AND_308)), DUP(op_AND_308)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_308)), DUP(op_AND_308))), CAST(32, MSB(DUP(op_AND_308)), DUP(op_AND_308)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_322)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_333 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_330), SN(32, 1)); + RzILOpPure *op_ADD_336 = ADD(op_LSHIFT_333, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_338 = SHIFTRA(op_ADD_336, SN(32, 16)); + RzILOpPure *op_ADD_339 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_300), DUP(op_AND_300))), CAST(32, MSB(DUP(op_AND_300)), DUP(op_AND_300))), op_RSHIFT_338); + RzILOpPure *op_EQ_340 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_289), SN(32, 0), SN(32, 0x20)), op_ADD_339); + RzILOpPure *op_RSHIFT_392 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_394 = LOGAND(op_RSHIFT_392, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_400 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_402 = LOGAND(op_RSHIFT_400, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_413 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_416 = LOGAND(op_RSHIFT_413, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_424 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_402), DUP(op_AND_402))), CAST(32, MSB(DUP(op_AND_402)), DUP(op_AND_402)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_402)), DUP(op_AND_402))), CAST(32, MSB(DUP(op_AND_402)), DUP(op_AND_402))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_402)), DUP(op_AND_402))), CAST(32, MSB(DUP(op_AND_402)), DUP(op_AND_402)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_402)), DUP(op_AND_402))), CAST(32, MSB(DUP(op_AND_402)), DUP(op_AND_402)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_416)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_427 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_424), SN(32, 1)); + RzILOpPure *op_ADD_430 = ADD(op_LSHIFT_427, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_432 = SHIFTRA(op_ADD_430, SN(32, 16)); + RzILOpPure *op_ADD_433 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_394), DUP(op_AND_394))), CAST(32, MSB(DUP(op_AND_394)), DUP(op_AND_394))), op_RSHIFT_432); + RzILOpPure *op_LT_436 = SLT(op_ADD_433, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_441 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_442 = NEG(op_LSHIFT_441); + RzILOpPure *op_LSHIFT_447 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_450 = SUB(op_LSHIFT_447, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_451 = ITE(op_LT_436, op_NEG_442, op_SUB_450); + RzILOpEffect *gcc_expr_452 = BRANCH(op_EQ_340, EMPTY(), set_usr_field_call_388); + + // h_tmp287 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_454 = SETL("h_tmp287", cond_451); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_455 = SEQN(2, gcc_expr_452, op_ASSIGN_hybrid_tmp_454); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10)) ? ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10) : h_tmp287) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_239 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_240 = LOGNOT(op_LSHIFT_239); + RzILOpPure *op_AND_241 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_240); + RzILOpPure *op_RSHIFT_344 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_346 = LOGAND(op_RSHIFT_344, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_352 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_354 = LOGAND(op_RSHIFT_352, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_365 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_368 = LOGAND(op_RSHIFT_365, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_376 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_354), DUP(op_AND_354))), CAST(32, MSB(DUP(op_AND_354)), DUP(op_AND_354)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_354)), DUP(op_AND_354))), CAST(32, MSB(DUP(op_AND_354)), DUP(op_AND_354))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_354)), DUP(op_AND_354))), CAST(32, MSB(DUP(op_AND_354)), DUP(op_AND_354)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_354)), DUP(op_AND_354))), CAST(32, MSB(DUP(op_AND_354)), DUP(op_AND_354)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_368)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_379 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_376), SN(32, 1)); + RzILOpPure *op_ADD_382 = ADD(op_LSHIFT_379, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_384 = SHIFTRA(op_ADD_382, SN(32, 16)); + RzILOpPure *op_ADD_385 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_346), DUP(op_AND_346))), CAST(32, MSB(DUP(op_AND_346)), DUP(op_AND_346))), op_RSHIFT_384); + RzILOpPure *cond_456 = ITE(DUP(op_EQ_340), op_ADD_385, VARL("h_tmp287")); + RzILOpPure *op_AND_458 = LOGAND(cond_456, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_462 = SHIFTL0(op_AND_458, SN(32, 0)); + RzILOpPure *op_OR_463 = LOGOR(op_AND_241, op_LSHIFT_462); + RzILOpEffect *op_ASSIGN_464 = WRITE_REG(bundle, Rxx_op, op_OR_463); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_465 = SEQN(2, seq_455, op_ASSIGN_464); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_233, seq_465); + return instruction_sequence; +} + +// Rxx += vmpywouh(Rss,Rtt):sat +RzILOpEffect *hex_il_op_m2_mmacuhs_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_147 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(Rtt, SN(32, 0x30)); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_37, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_48 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_25), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_40)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_51 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_48), SN(32, 0)); + RzILOpPure *op_RSHIFT_53 = SHIFTRA(op_LSHIFT_51, SN(32, 16)); + RzILOpPure *op_ADD_54 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_16), DUP(op_AND_16))), CAST(32, MSB(DUP(op_AND_16)), DUP(op_AND_16))), op_RSHIFT_53); + RzILOpPure *op_RSHIFT_63 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_65 = LOGAND(op_RSHIFT_63, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_71 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_73 = LOGAND(op_RSHIFT_71, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_84 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_87 = LOGAND(op_RSHIFT_84, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_95 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_73), DUP(op_AND_73))), CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73))), CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73))), CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73))), CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_87)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_98 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_95), SN(32, 0)); + RzILOpPure *op_RSHIFT_100 = SHIFTRA(op_LSHIFT_98, SN(32, 16)); + RzILOpPure *op_ADD_101 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_65), DUP(op_AND_65))), CAST(32, MSB(DUP(op_AND_65)), DUP(op_AND_65))), op_RSHIFT_100); + RzILOpPure *op_EQ_102 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_54), SN(32, 0), SN(32, 0x20)), op_ADD_101); + RzILOpPure *op_RSHIFT_151 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_153 = LOGAND(op_RSHIFT_151, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_159 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_161 = LOGAND(op_RSHIFT_159, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_172 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_175 = LOGAND(op_RSHIFT_172, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_183 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_161), DUP(op_AND_161))), CAST(32, MSB(DUP(op_AND_161)), DUP(op_AND_161)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_161)), DUP(op_AND_161))), CAST(32, MSB(DUP(op_AND_161)), DUP(op_AND_161))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_161)), DUP(op_AND_161))), CAST(32, MSB(DUP(op_AND_161)), DUP(op_AND_161)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_161)), DUP(op_AND_161))), CAST(32, MSB(DUP(op_AND_161)), DUP(op_AND_161)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_175)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_186 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_183), SN(32, 0)); + RzILOpPure *op_RSHIFT_188 = SHIFTRA(op_LSHIFT_186, SN(32, 16)); + RzILOpPure *op_ADD_189 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_153), DUP(op_AND_153))), CAST(32, MSB(DUP(op_AND_153)), DUP(op_AND_153))), op_RSHIFT_188); + RzILOpPure *op_LT_192 = SLT(op_ADD_189, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_197 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_198 = NEG(op_LSHIFT_197); + RzILOpPure *op_LSHIFT_203 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_206 = SUB(op_LSHIFT_203, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_207 = ITE(op_LT_192, op_NEG_198, op_SUB_206); + RzILOpEffect *gcc_expr_208 = BRANCH(op_EQ_102, EMPTY(), set_usr_field_call_147); + + // h_tmp288 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_210 = SETL("h_tmp288", cond_207); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_211 = SEQN(2, gcc_expr_208, op_ASSIGN_hybrid_tmp_210); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10)) ? ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10) : h_tmp288) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_106 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_108 = LOGAND(op_RSHIFT_106, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_114 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_116 = LOGAND(op_RSHIFT_114, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_127 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_130 = LOGAND(op_RSHIFT_127, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_138 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_116), DUP(op_AND_116))), CAST(32, MSB(DUP(op_AND_116)), DUP(op_AND_116)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_116)), DUP(op_AND_116))), CAST(32, MSB(DUP(op_AND_116)), DUP(op_AND_116))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_116)), DUP(op_AND_116))), CAST(32, MSB(DUP(op_AND_116)), DUP(op_AND_116)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_116)), DUP(op_AND_116))), CAST(32, MSB(DUP(op_AND_116)), DUP(op_AND_116)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_130)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_141 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_138), SN(32, 0)); + RzILOpPure *op_RSHIFT_143 = SHIFTRA(op_LSHIFT_141, SN(32, 16)); + RzILOpPure *op_ADD_144 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_108), DUP(op_AND_108))), CAST(32, MSB(DUP(op_AND_108)), DUP(op_AND_108))), op_RSHIFT_143); + RzILOpPure *cond_212 = ITE(DUP(op_EQ_102), op_ADD_144, VARL("h_tmp288")); + RzILOpPure *op_AND_214 = LOGAND(cond_212, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_218 = SHIFTL0(op_AND_214, SN(32, 0x20)); + RzILOpPure *op_OR_219 = LOGOR(op_AND_7, op_LSHIFT_218); + RzILOpEffect *op_ASSIGN_220 = WRITE_REG(bundle, Rxx_op, op_OR_219); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_221 = SEQN(2, seq_211, op_ASSIGN_220); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_367 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_236 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_238 = LOGAND(op_RSHIFT_236, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_244 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_246 = LOGAND(op_RSHIFT_244, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_257 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_260 = LOGAND(op_RSHIFT_257, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_268 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_246), DUP(op_AND_246))), CAST(32, MSB(DUP(op_AND_246)), DUP(op_AND_246)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_246)), DUP(op_AND_246))), CAST(32, MSB(DUP(op_AND_246)), DUP(op_AND_246))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_246)), DUP(op_AND_246))), CAST(32, MSB(DUP(op_AND_246)), DUP(op_AND_246)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_246)), DUP(op_AND_246))), CAST(32, MSB(DUP(op_AND_246)), DUP(op_AND_246)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_260)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_271 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_268), SN(32, 0)); + RzILOpPure *op_RSHIFT_273 = SHIFTRA(op_LSHIFT_271, SN(32, 16)); + RzILOpPure *op_ADD_274 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_238), DUP(op_AND_238))), CAST(32, MSB(DUP(op_AND_238)), DUP(op_AND_238))), op_RSHIFT_273); + RzILOpPure *op_RSHIFT_283 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_285 = LOGAND(op_RSHIFT_283, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_291 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_293 = LOGAND(op_RSHIFT_291, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_304 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_307 = LOGAND(op_RSHIFT_304, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_315 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_293), DUP(op_AND_293))), CAST(32, MSB(DUP(op_AND_293)), DUP(op_AND_293)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_293)), DUP(op_AND_293))), CAST(32, MSB(DUP(op_AND_293)), DUP(op_AND_293))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_293)), DUP(op_AND_293))), CAST(32, MSB(DUP(op_AND_293)), DUP(op_AND_293)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_293)), DUP(op_AND_293))), CAST(32, MSB(DUP(op_AND_293)), DUP(op_AND_293)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_307)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_318 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_315), SN(32, 0)); + RzILOpPure *op_RSHIFT_320 = SHIFTRA(op_LSHIFT_318, SN(32, 16)); + RzILOpPure *op_ADD_321 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_285), DUP(op_AND_285))), CAST(32, MSB(DUP(op_AND_285)), DUP(op_AND_285))), op_RSHIFT_320); + RzILOpPure *op_EQ_322 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_274), SN(32, 0), SN(32, 0x20)), op_ADD_321); + RzILOpPure *op_RSHIFT_371 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_373 = LOGAND(op_RSHIFT_371, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_379 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_381 = LOGAND(op_RSHIFT_379, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_392 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_395 = LOGAND(op_RSHIFT_392, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_403 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_381), DUP(op_AND_381))), CAST(32, MSB(DUP(op_AND_381)), DUP(op_AND_381)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_381)), DUP(op_AND_381))), CAST(32, MSB(DUP(op_AND_381)), DUP(op_AND_381))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_381)), DUP(op_AND_381))), CAST(32, MSB(DUP(op_AND_381)), DUP(op_AND_381)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_381)), DUP(op_AND_381))), CAST(32, MSB(DUP(op_AND_381)), DUP(op_AND_381)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_395)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_406 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_403), SN(32, 0)); + RzILOpPure *op_RSHIFT_408 = SHIFTRA(op_LSHIFT_406, SN(32, 16)); + RzILOpPure *op_ADD_409 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_373), DUP(op_AND_373))), CAST(32, MSB(DUP(op_AND_373)), DUP(op_AND_373))), op_RSHIFT_408); + RzILOpPure *op_LT_412 = SLT(op_ADD_409, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_417 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_418 = NEG(op_LSHIFT_417); + RzILOpPure *op_LSHIFT_423 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_426 = SUB(op_LSHIFT_423, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_427 = ITE(op_LT_412, op_NEG_418, op_SUB_426); + RzILOpEffect *gcc_expr_428 = BRANCH(op_EQ_322, EMPTY(), set_usr_field_call_367); + + // h_tmp289 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_430 = SETL("h_tmp289", cond_427); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_431 = SEQN(2, gcc_expr_428, op_ASSIGN_hybrid_tmp_430); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10)) ? ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10) : h_tmp289) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_227 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_228 = LOGNOT(op_LSHIFT_227); + RzILOpPure *op_AND_229 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_228); + RzILOpPure *op_RSHIFT_326 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_328 = LOGAND(op_RSHIFT_326, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_334 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_336 = LOGAND(op_RSHIFT_334, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_347 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_350 = LOGAND(op_RSHIFT_347, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_358 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_336), DUP(op_AND_336))), CAST(32, MSB(DUP(op_AND_336)), DUP(op_AND_336)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_336)), DUP(op_AND_336))), CAST(32, MSB(DUP(op_AND_336)), DUP(op_AND_336))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_336)), DUP(op_AND_336))), CAST(32, MSB(DUP(op_AND_336)), DUP(op_AND_336)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_336)), DUP(op_AND_336))), CAST(32, MSB(DUP(op_AND_336)), DUP(op_AND_336)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_350)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_361 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_358), SN(32, 0)); + RzILOpPure *op_RSHIFT_363 = SHIFTRA(op_LSHIFT_361, SN(32, 16)); + RzILOpPure *op_ADD_364 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_328), DUP(op_AND_328))), CAST(32, MSB(DUP(op_AND_328)), DUP(op_AND_328))), op_RSHIFT_363); + RzILOpPure *cond_432 = ITE(DUP(op_EQ_322), op_ADD_364, VARL("h_tmp289")); + RzILOpPure *op_AND_434 = LOGAND(cond_432, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_438 = SHIFTL0(op_AND_434, SN(32, 0)); + RzILOpPure *op_OR_439 = LOGOR(op_AND_229, op_LSHIFT_438); + RzILOpEffect *op_ASSIGN_440 = WRITE_REG(bundle, Rxx_op, op_OR_439); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_441 = SEQN(2, seq_431, op_ASSIGN_440); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_221, seq_441); + return instruction_sequence; +} + +// Rxx += vmpywouh(Rss,Rtt):<<1:sat +RzILOpEffect *hex_il_op_m2_mmacuhs_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_147 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(Rtt, SN(32, 0x30)); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_37, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_48 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_25), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_40)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_51 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_48), SN(32, 1)); + RzILOpPure *op_RSHIFT_53 = SHIFTRA(op_LSHIFT_51, SN(32, 16)); + RzILOpPure *op_ADD_54 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_16), DUP(op_AND_16))), CAST(32, MSB(DUP(op_AND_16)), DUP(op_AND_16))), op_RSHIFT_53); + RzILOpPure *op_RSHIFT_63 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_65 = LOGAND(op_RSHIFT_63, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_71 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_73 = LOGAND(op_RSHIFT_71, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_84 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_87 = LOGAND(op_RSHIFT_84, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_95 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_73), DUP(op_AND_73))), CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73))), CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73))), CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73))), CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_87)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_98 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_95), SN(32, 1)); + RzILOpPure *op_RSHIFT_100 = SHIFTRA(op_LSHIFT_98, SN(32, 16)); + RzILOpPure *op_ADD_101 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_65), DUP(op_AND_65))), CAST(32, MSB(DUP(op_AND_65)), DUP(op_AND_65))), op_RSHIFT_100); + RzILOpPure *op_EQ_102 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_54), SN(32, 0), SN(32, 0x20)), op_ADD_101); + RzILOpPure *op_RSHIFT_151 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_153 = LOGAND(op_RSHIFT_151, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_159 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_161 = LOGAND(op_RSHIFT_159, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_172 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_175 = LOGAND(op_RSHIFT_172, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_183 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_161), DUP(op_AND_161))), CAST(32, MSB(DUP(op_AND_161)), DUP(op_AND_161)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_161)), DUP(op_AND_161))), CAST(32, MSB(DUP(op_AND_161)), DUP(op_AND_161))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_161)), DUP(op_AND_161))), CAST(32, MSB(DUP(op_AND_161)), DUP(op_AND_161)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_161)), DUP(op_AND_161))), CAST(32, MSB(DUP(op_AND_161)), DUP(op_AND_161)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_175)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_186 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_183), SN(32, 1)); + RzILOpPure *op_RSHIFT_188 = SHIFTRA(op_LSHIFT_186, SN(32, 16)); + RzILOpPure *op_ADD_189 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_153), DUP(op_AND_153))), CAST(32, MSB(DUP(op_AND_153)), DUP(op_AND_153))), op_RSHIFT_188); + RzILOpPure *op_LT_192 = SLT(op_ADD_189, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_197 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_198 = NEG(op_LSHIFT_197); + RzILOpPure *op_LSHIFT_203 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_206 = SUB(op_LSHIFT_203, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_207 = ITE(op_LT_192, op_NEG_198, op_SUB_206); + RzILOpEffect *gcc_expr_208 = BRANCH(op_EQ_102, EMPTY(), set_usr_field_call_147); + + // h_tmp290 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_210 = SETL("h_tmp290", cond_207); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_211 = SEQN(2, gcc_expr_208, op_ASSIGN_hybrid_tmp_210); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10)) ? ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10) : h_tmp290) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_106 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_108 = LOGAND(op_RSHIFT_106, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_114 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_116 = LOGAND(op_RSHIFT_114, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_127 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_130 = LOGAND(op_RSHIFT_127, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_138 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_116), DUP(op_AND_116))), CAST(32, MSB(DUP(op_AND_116)), DUP(op_AND_116)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_116)), DUP(op_AND_116))), CAST(32, MSB(DUP(op_AND_116)), DUP(op_AND_116))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_116)), DUP(op_AND_116))), CAST(32, MSB(DUP(op_AND_116)), DUP(op_AND_116)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_116)), DUP(op_AND_116))), CAST(32, MSB(DUP(op_AND_116)), DUP(op_AND_116)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_130)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_141 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_138), SN(32, 1)); + RzILOpPure *op_RSHIFT_143 = SHIFTRA(op_LSHIFT_141, SN(32, 16)); + RzILOpPure *op_ADD_144 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_108), DUP(op_AND_108))), CAST(32, MSB(DUP(op_AND_108)), DUP(op_AND_108))), op_RSHIFT_143); + RzILOpPure *cond_212 = ITE(DUP(op_EQ_102), op_ADD_144, VARL("h_tmp290")); + RzILOpPure *op_AND_214 = LOGAND(cond_212, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_218 = SHIFTL0(op_AND_214, SN(32, 0x20)); + RzILOpPure *op_OR_219 = LOGOR(op_AND_7, op_LSHIFT_218); + RzILOpEffect *op_ASSIGN_220 = WRITE_REG(bundle, Rxx_op, op_OR_219); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_221 = SEQN(2, seq_211, op_ASSIGN_220); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_367 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_236 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_238 = LOGAND(op_RSHIFT_236, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_244 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_246 = LOGAND(op_RSHIFT_244, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_257 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_260 = LOGAND(op_RSHIFT_257, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_268 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_246), DUP(op_AND_246))), CAST(32, MSB(DUP(op_AND_246)), DUP(op_AND_246)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_246)), DUP(op_AND_246))), CAST(32, MSB(DUP(op_AND_246)), DUP(op_AND_246))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_246)), DUP(op_AND_246))), CAST(32, MSB(DUP(op_AND_246)), DUP(op_AND_246)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_246)), DUP(op_AND_246))), CAST(32, MSB(DUP(op_AND_246)), DUP(op_AND_246)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_260)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_271 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_268), SN(32, 1)); + RzILOpPure *op_RSHIFT_273 = SHIFTRA(op_LSHIFT_271, SN(32, 16)); + RzILOpPure *op_ADD_274 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_238), DUP(op_AND_238))), CAST(32, MSB(DUP(op_AND_238)), DUP(op_AND_238))), op_RSHIFT_273); + RzILOpPure *op_RSHIFT_283 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_285 = LOGAND(op_RSHIFT_283, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_291 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_293 = LOGAND(op_RSHIFT_291, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_304 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_307 = LOGAND(op_RSHIFT_304, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_315 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_293), DUP(op_AND_293))), CAST(32, MSB(DUP(op_AND_293)), DUP(op_AND_293)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_293)), DUP(op_AND_293))), CAST(32, MSB(DUP(op_AND_293)), DUP(op_AND_293))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_293)), DUP(op_AND_293))), CAST(32, MSB(DUP(op_AND_293)), DUP(op_AND_293)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_293)), DUP(op_AND_293))), CAST(32, MSB(DUP(op_AND_293)), DUP(op_AND_293)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_307)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_318 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_315), SN(32, 1)); + RzILOpPure *op_RSHIFT_320 = SHIFTRA(op_LSHIFT_318, SN(32, 16)); + RzILOpPure *op_ADD_321 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_285), DUP(op_AND_285))), CAST(32, MSB(DUP(op_AND_285)), DUP(op_AND_285))), op_RSHIFT_320); + RzILOpPure *op_EQ_322 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_274), SN(32, 0), SN(32, 0x20)), op_ADD_321); + RzILOpPure *op_RSHIFT_371 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_373 = LOGAND(op_RSHIFT_371, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_379 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_381 = LOGAND(op_RSHIFT_379, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_392 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_395 = LOGAND(op_RSHIFT_392, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_403 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_381), DUP(op_AND_381))), CAST(32, MSB(DUP(op_AND_381)), DUP(op_AND_381)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_381)), DUP(op_AND_381))), CAST(32, MSB(DUP(op_AND_381)), DUP(op_AND_381))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_381)), DUP(op_AND_381))), CAST(32, MSB(DUP(op_AND_381)), DUP(op_AND_381)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_381)), DUP(op_AND_381))), CAST(32, MSB(DUP(op_AND_381)), DUP(op_AND_381)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_395)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_406 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_403), SN(32, 1)); + RzILOpPure *op_RSHIFT_408 = SHIFTRA(op_LSHIFT_406, SN(32, 16)); + RzILOpPure *op_ADD_409 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_373), DUP(op_AND_373))), CAST(32, MSB(DUP(op_AND_373)), DUP(op_AND_373))), op_RSHIFT_408); + RzILOpPure *op_LT_412 = SLT(op_ADD_409, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_417 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_418 = NEG(op_LSHIFT_417); + RzILOpPure *op_LSHIFT_423 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_426 = SUB(op_LSHIFT_423, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_427 = ITE(op_LT_412, op_NEG_418, op_SUB_426); + RzILOpEffect *gcc_expr_428 = BRANCH(op_EQ_322, EMPTY(), set_usr_field_call_367); + + // h_tmp291 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_430 = SETL("h_tmp291", cond_427); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_431 = SEQN(2, gcc_expr_428, op_ASSIGN_hybrid_tmp_430); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10)) ? ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10) : h_tmp291) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_227 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_228 = LOGNOT(op_LSHIFT_227); + RzILOpPure *op_AND_229 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_228); + RzILOpPure *op_RSHIFT_326 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_328 = LOGAND(op_RSHIFT_326, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_334 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_336 = LOGAND(op_RSHIFT_334, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_347 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_350 = LOGAND(op_RSHIFT_347, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_358 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_336), DUP(op_AND_336))), CAST(32, MSB(DUP(op_AND_336)), DUP(op_AND_336)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_336)), DUP(op_AND_336))), CAST(32, MSB(DUP(op_AND_336)), DUP(op_AND_336))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_336)), DUP(op_AND_336))), CAST(32, MSB(DUP(op_AND_336)), DUP(op_AND_336)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_336)), DUP(op_AND_336))), CAST(32, MSB(DUP(op_AND_336)), DUP(op_AND_336)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_350)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_361 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_358), SN(32, 1)); + RzILOpPure *op_RSHIFT_363 = SHIFTRA(op_LSHIFT_361, SN(32, 16)); + RzILOpPure *op_ADD_364 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_328), DUP(op_AND_328))), CAST(32, MSB(DUP(op_AND_328)), DUP(op_AND_328))), op_RSHIFT_363); + RzILOpPure *cond_432 = ITE(DUP(op_EQ_322), op_ADD_364, VARL("h_tmp291")); + RzILOpPure *op_AND_434 = LOGAND(cond_432, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_438 = SHIFTL0(op_AND_434, SN(32, 0)); + RzILOpPure *op_OR_439 = LOGOR(op_AND_229, op_LSHIFT_438); + RzILOpEffect *op_ASSIGN_440 = WRITE_REG(bundle, Rxx_op, op_OR_439); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_441 = SEQN(2, seq_431, op_ASSIGN_440); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_221, seq_441); + return instruction_sequence; +} + +// Rxx += vmpyweuh(Rss,Rtt):rnd:sat +RzILOpEffect *hex_il_op_m2_mmaculs_rs0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_156 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(Rtt, SN(32, 0x20)); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_37, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_48 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_25), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_40)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_51 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_48), SN(32, 0)); + RzILOpPure *op_ADD_54 = ADD(op_LSHIFT_51, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_56 = SHIFTRA(op_ADD_54, SN(32, 16)); + RzILOpPure *op_ADD_57 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_16), DUP(op_AND_16))), CAST(32, MSB(DUP(op_AND_16)), DUP(op_AND_16))), op_RSHIFT_56); + RzILOpPure *op_RSHIFT_66 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_68 = LOGAND(op_RSHIFT_66, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_74 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_76 = LOGAND(op_RSHIFT_74, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_87 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_90 = LOGAND(op_RSHIFT_87, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_98 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_76), DUP(op_AND_76))), CAST(32, MSB(DUP(op_AND_76)), DUP(op_AND_76)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_76)), DUP(op_AND_76))), CAST(32, MSB(DUP(op_AND_76)), DUP(op_AND_76))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_76)), DUP(op_AND_76))), CAST(32, MSB(DUP(op_AND_76)), DUP(op_AND_76)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_76)), DUP(op_AND_76))), CAST(32, MSB(DUP(op_AND_76)), DUP(op_AND_76)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_90)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_101 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_98), SN(32, 0)); + RzILOpPure *op_ADD_104 = ADD(op_LSHIFT_101, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_106 = SHIFTRA(op_ADD_104, SN(32, 16)); + RzILOpPure *op_ADD_107 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_68), DUP(op_AND_68))), CAST(32, MSB(DUP(op_AND_68)), DUP(op_AND_68))), op_RSHIFT_106); + RzILOpPure *op_EQ_108 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_57), SN(32, 0), SN(32, 0x20)), op_ADD_107); + RzILOpPure *op_RSHIFT_160 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_162 = LOGAND(op_RSHIFT_160, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_168 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_170 = LOGAND(op_RSHIFT_168, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_181 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_184 = LOGAND(op_RSHIFT_181, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_192 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_170), DUP(op_AND_170))), CAST(32, MSB(DUP(op_AND_170)), DUP(op_AND_170)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_170)), DUP(op_AND_170))), CAST(32, MSB(DUP(op_AND_170)), DUP(op_AND_170))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_170)), DUP(op_AND_170))), CAST(32, MSB(DUP(op_AND_170)), DUP(op_AND_170)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_170)), DUP(op_AND_170))), CAST(32, MSB(DUP(op_AND_170)), DUP(op_AND_170)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_184)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_195 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_192), SN(32, 0)); + RzILOpPure *op_ADD_198 = ADD(op_LSHIFT_195, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_200 = SHIFTRA(op_ADD_198, SN(32, 16)); + RzILOpPure *op_ADD_201 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_162), DUP(op_AND_162))), CAST(32, MSB(DUP(op_AND_162)), DUP(op_AND_162))), op_RSHIFT_200); + RzILOpPure *op_LT_204 = SLT(op_ADD_201, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_209 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_210 = NEG(op_LSHIFT_209); + RzILOpPure *op_LSHIFT_215 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_218 = SUB(op_LSHIFT_215, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_219 = ITE(op_LT_204, op_NEG_210, op_SUB_218); + RzILOpEffect *gcc_expr_220 = BRANCH(op_EQ_108, EMPTY(), set_usr_field_call_156); + + // h_tmp292 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_222 = SETL("h_tmp292", cond_219); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_223 = SEQN(2, gcc_expr_220, op_ASSIGN_hybrid_tmp_222); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10)) ? ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10) : h_tmp292) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_112 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_114 = LOGAND(op_RSHIFT_112, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_120 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_122 = LOGAND(op_RSHIFT_120, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_133 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_136 = LOGAND(op_RSHIFT_133, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_144 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_122), DUP(op_AND_122))), CAST(32, MSB(DUP(op_AND_122)), DUP(op_AND_122)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_122)), DUP(op_AND_122))), CAST(32, MSB(DUP(op_AND_122)), DUP(op_AND_122))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_122)), DUP(op_AND_122))), CAST(32, MSB(DUP(op_AND_122)), DUP(op_AND_122)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_122)), DUP(op_AND_122))), CAST(32, MSB(DUP(op_AND_122)), DUP(op_AND_122)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_136)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_147 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_144), SN(32, 0)); + RzILOpPure *op_ADD_150 = ADD(op_LSHIFT_147, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_152 = SHIFTRA(op_ADD_150, SN(32, 16)); + RzILOpPure *op_ADD_153 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_114), DUP(op_AND_114))), CAST(32, MSB(DUP(op_AND_114)), DUP(op_AND_114))), op_RSHIFT_152); + RzILOpPure *cond_224 = ITE(DUP(op_EQ_108), op_ADD_153, VARL("h_tmp292")); + RzILOpPure *op_AND_226 = LOGAND(cond_224, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_230 = SHIFTL0(op_AND_226, SN(32, 0x20)); + RzILOpPure *op_OR_231 = LOGOR(op_AND_7, op_LSHIFT_230); + RzILOpEffect *op_ASSIGN_232 = WRITE_REG(bundle, Rxx_op, op_OR_231); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_233 = SEQN(2, seq_223, op_ASSIGN_232); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_388 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_248 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_250 = LOGAND(op_RSHIFT_248, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_256 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_258 = LOGAND(op_RSHIFT_256, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_269 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_272 = LOGAND(op_RSHIFT_269, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_280 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_258), DUP(op_AND_258))), CAST(32, MSB(DUP(op_AND_258)), DUP(op_AND_258)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_258)), DUP(op_AND_258))), CAST(32, MSB(DUP(op_AND_258)), DUP(op_AND_258))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_258)), DUP(op_AND_258))), CAST(32, MSB(DUP(op_AND_258)), DUP(op_AND_258)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_258)), DUP(op_AND_258))), CAST(32, MSB(DUP(op_AND_258)), DUP(op_AND_258)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_272)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_283 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_280), SN(32, 0)); + RzILOpPure *op_ADD_286 = ADD(op_LSHIFT_283, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_288 = SHIFTRA(op_ADD_286, SN(32, 16)); + RzILOpPure *op_ADD_289 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_250), DUP(op_AND_250))), CAST(32, MSB(DUP(op_AND_250)), DUP(op_AND_250))), op_RSHIFT_288); + RzILOpPure *op_RSHIFT_298 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_300 = LOGAND(op_RSHIFT_298, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_306 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_308 = LOGAND(op_RSHIFT_306, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_319 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_322 = LOGAND(op_RSHIFT_319, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_330 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_308), DUP(op_AND_308))), CAST(32, MSB(DUP(op_AND_308)), DUP(op_AND_308)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_308)), DUP(op_AND_308))), CAST(32, MSB(DUP(op_AND_308)), DUP(op_AND_308))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_308)), DUP(op_AND_308))), CAST(32, MSB(DUP(op_AND_308)), DUP(op_AND_308)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_308)), DUP(op_AND_308))), CAST(32, MSB(DUP(op_AND_308)), DUP(op_AND_308)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_322)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_333 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_330), SN(32, 0)); + RzILOpPure *op_ADD_336 = ADD(op_LSHIFT_333, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_338 = SHIFTRA(op_ADD_336, SN(32, 16)); + RzILOpPure *op_ADD_339 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_300), DUP(op_AND_300))), CAST(32, MSB(DUP(op_AND_300)), DUP(op_AND_300))), op_RSHIFT_338); + RzILOpPure *op_EQ_340 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_289), SN(32, 0), SN(32, 0x20)), op_ADD_339); + RzILOpPure *op_RSHIFT_392 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_394 = LOGAND(op_RSHIFT_392, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_400 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_402 = LOGAND(op_RSHIFT_400, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_413 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_416 = LOGAND(op_RSHIFT_413, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_424 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_402), DUP(op_AND_402))), CAST(32, MSB(DUP(op_AND_402)), DUP(op_AND_402)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_402)), DUP(op_AND_402))), CAST(32, MSB(DUP(op_AND_402)), DUP(op_AND_402))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_402)), DUP(op_AND_402))), CAST(32, MSB(DUP(op_AND_402)), DUP(op_AND_402)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_402)), DUP(op_AND_402))), CAST(32, MSB(DUP(op_AND_402)), DUP(op_AND_402)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_416)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_427 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_424), SN(32, 0)); + RzILOpPure *op_ADD_430 = ADD(op_LSHIFT_427, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_432 = SHIFTRA(op_ADD_430, SN(32, 16)); + RzILOpPure *op_ADD_433 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_394), DUP(op_AND_394))), CAST(32, MSB(DUP(op_AND_394)), DUP(op_AND_394))), op_RSHIFT_432); + RzILOpPure *op_LT_436 = SLT(op_ADD_433, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_441 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_442 = NEG(op_LSHIFT_441); + RzILOpPure *op_LSHIFT_447 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_450 = SUB(op_LSHIFT_447, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_451 = ITE(op_LT_436, op_NEG_442, op_SUB_450); + RzILOpEffect *gcc_expr_452 = BRANCH(op_EQ_340, EMPTY(), set_usr_field_call_388); + + // h_tmp293 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_454 = SETL("h_tmp293", cond_451); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_455 = SEQN(2, gcc_expr_452, op_ASSIGN_hybrid_tmp_454); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10)) ? ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10) : h_tmp293) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_239 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_240 = LOGNOT(op_LSHIFT_239); + RzILOpPure *op_AND_241 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_240); + RzILOpPure *op_RSHIFT_344 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_346 = LOGAND(op_RSHIFT_344, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_352 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_354 = LOGAND(op_RSHIFT_352, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_365 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_368 = LOGAND(op_RSHIFT_365, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_376 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_354), DUP(op_AND_354))), CAST(32, MSB(DUP(op_AND_354)), DUP(op_AND_354)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_354)), DUP(op_AND_354))), CAST(32, MSB(DUP(op_AND_354)), DUP(op_AND_354))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_354)), DUP(op_AND_354))), CAST(32, MSB(DUP(op_AND_354)), DUP(op_AND_354)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_354)), DUP(op_AND_354))), CAST(32, MSB(DUP(op_AND_354)), DUP(op_AND_354)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_368)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_379 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_376), SN(32, 0)); + RzILOpPure *op_ADD_382 = ADD(op_LSHIFT_379, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_384 = SHIFTRA(op_ADD_382, SN(32, 16)); + RzILOpPure *op_ADD_385 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_346), DUP(op_AND_346))), CAST(32, MSB(DUP(op_AND_346)), DUP(op_AND_346))), op_RSHIFT_384); + RzILOpPure *cond_456 = ITE(DUP(op_EQ_340), op_ADD_385, VARL("h_tmp293")); + RzILOpPure *op_AND_458 = LOGAND(cond_456, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_462 = SHIFTL0(op_AND_458, SN(32, 0)); + RzILOpPure *op_OR_463 = LOGOR(op_AND_241, op_LSHIFT_462); + RzILOpEffect *op_ASSIGN_464 = WRITE_REG(bundle, Rxx_op, op_OR_463); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_465 = SEQN(2, seq_455, op_ASSIGN_464); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_233, seq_465); + return instruction_sequence; +} + +// Rxx += vmpyweuh(Rss,Rtt):<<1:rnd:sat +RzILOpEffect *hex_il_op_m2_mmaculs_rs1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_156 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(Rtt, SN(32, 0x20)); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_37, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_48 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_25), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_40)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_51 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_48), SN(32, 1)); + RzILOpPure *op_ADD_54 = ADD(op_LSHIFT_51, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_56 = SHIFTRA(op_ADD_54, SN(32, 16)); + RzILOpPure *op_ADD_57 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_16), DUP(op_AND_16))), CAST(32, MSB(DUP(op_AND_16)), DUP(op_AND_16))), op_RSHIFT_56); + RzILOpPure *op_RSHIFT_66 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_68 = LOGAND(op_RSHIFT_66, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_74 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_76 = LOGAND(op_RSHIFT_74, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_87 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_90 = LOGAND(op_RSHIFT_87, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_98 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_76), DUP(op_AND_76))), CAST(32, MSB(DUP(op_AND_76)), DUP(op_AND_76)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_76)), DUP(op_AND_76))), CAST(32, MSB(DUP(op_AND_76)), DUP(op_AND_76))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_76)), DUP(op_AND_76))), CAST(32, MSB(DUP(op_AND_76)), DUP(op_AND_76)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_76)), DUP(op_AND_76))), CAST(32, MSB(DUP(op_AND_76)), DUP(op_AND_76)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_90)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_101 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_98), SN(32, 1)); + RzILOpPure *op_ADD_104 = ADD(op_LSHIFT_101, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_106 = SHIFTRA(op_ADD_104, SN(32, 16)); + RzILOpPure *op_ADD_107 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_68), DUP(op_AND_68))), CAST(32, MSB(DUP(op_AND_68)), DUP(op_AND_68))), op_RSHIFT_106); + RzILOpPure *op_EQ_108 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_57), SN(32, 0), SN(32, 0x20)), op_ADD_107); + RzILOpPure *op_RSHIFT_160 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_162 = LOGAND(op_RSHIFT_160, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_168 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_170 = LOGAND(op_RSHIFT_168, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_181 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_184 = LOGAND(op_RSHIFT_181, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_192 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_170), DUP(op_AND_170))), CAST(32, MSB(DUP(op_AND_170)), DUP(op_AND_170)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_170)), DUP(op_AND_170))), CAST(32, MSB(DUP(op_AND_170)), DUP(op_AND_170))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_170)), DUP(op_AND_170))), CAST(32, MSB(DUP(op_AND_170)), DUP(op_AND_170)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_170)), DUP(op_AND_170))), CAST(32, MSB(DUP(op_AND_170)), DUP(op_AND_170)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_184)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_195 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_192), SN(32, 1)); + RzILOpPure *op_ADD_198 = ADD(op_LSHIFT_195, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_200 = SHIFTRA(op_ADD_198, SN(32, 16)); + RzILOpPure *op_ADD_201 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_162), DUP(op_AND_162))), CAST(32, MSB(DUP(op_AND_162)), DUP(op_AND_162))), op_RSHIFT_200); + RzILOpPure *op_LT_204 = SLT(op_ADD_201, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_209 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_210 = NEG(op_LSHIFT_209); + RzILOpPure *op_LSHIFT_215 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_218 = SUB(op_LSHIFT_215, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_219 = ITE(op_LT_204, op_NEG_210, op_SUB_218); + RzILOpEffect *gcc_expr_220 = BRANCH(op_EQ_108, EMPTY(), set_usr_field_call_156); + + // h_tmp294 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_222 = SETL("h_tmp294", cond_219); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_223 = SEQN(2, gcc_expr_220, op_ASSIGN_hybrid_tmp_222); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10)) ? ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10) : h_tmp294) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_112 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_114 = LOGAND(op_RSHIFT_112, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_120 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_122 = LOGAND(op_RSHIFT_120, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_133 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_136 = LOGAND(op_RSHIFT_133, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_144 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_122), DUP(op_AND_122))), CAST(32, MSB(DUP(op_AND_122)), DUP(op_AND_122)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_122)), DUP(op_AND_122))), CAST(32, MSB(DUP(op_AND_122)), DUP(op_AND_122))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_122)), DUP(op_AND_122))), CAST(32, MSB(DUP(op_AND_122)), DUP(op_AND_122)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_122)), DUP(op_AND_122))), CAST(32, MSB(DUP(op_AND_122)), DUP(op_AND_122)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_136)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_147 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_144), SN(32, 1)); + RzILOpPure *op_ADD_150 = ADD(op_LSHIFT_147, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_152 = SHIFTRA(op_ADD_150, SN(32, 16)); + RzILOpPure *op_ADD_153 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_114), DUP(op_AND_114))), CAST(32, MSB(DUP(op_AND_114)), DUP(op_AND_114))), op_RSHIFT_152); + RzILOpPure *cond_224 = ITE(DUP(op_EQ_108), op_ADD_153, VARL("h_tmp294")); + RzILOpPure *op_AND_226 = LOGAND(cond_224, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_230 = SHIFTL0(op_AND_226, SN(32, 0x20)); + RzILOpPure *op_OR_231 = LOGOR(op_AND_7, op_LSHIFT_230); + RzILOpEffect *op_ASSIGN_232 = WRITE_REG(bundle, Rxx_op, op_OR_231); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_233 = SEQN(2, seq_223, op_ASSIGN_232); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_388 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_248 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_250 = LOGAND(op_RSHIFT_248, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_256 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_258 = LOGAND(op_RSHIFT_256, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_269 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_272 = LOGAND(op_RSHIFT_269, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_280 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_258), DUP(op_AND_258))), CAST(32, MSB(DUP(op_AND_258)), DUP(op_AND_258)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_258)), DUP(op_AND_258))), CAST(32, MSB(DUP(op_AND_258)), DUP(op_AND_258))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_258)), DUP(op_AND_258))), CAST(32, MSB(DUP(op_AND_258)), DUP(op_AND_258)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_258)), DUP(op_AND_258))), CAST(32, MSB(DUP(op_AND_258)), DUP(op_AND_258)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_272)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_283 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_280), SN(32, 1)); + RzILOpPure *op_ADD_286 = ADD(op_LSHIFT_283, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_288 = SHIFTRA(op_ADD_286, SN(32, 16)); + RzILOpPure *op_ADD_289 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_250), DUP(op_AND_250))), CAST(32, MSB(DUP(op_AND_250)), DUP(op_AND_250))), op_RSHIFT_288); + RzILOpPure *op_RSHIFT_298 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_300 = LOGAND(op_RSHIFT_298, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_306 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_308 = LOGAND(op_RSHIFT_306, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_319 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_322 = LOGAND(op_RSHIFT_319, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_330 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_308), DUP(op_AND_308))), CAST(32, MSB(DUP(op_AND_308)), DUP(op_AND_308)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_308)), DUP(op_AND_308))), CAST(32, MSB(DUP(op_AND_308)), DUP(op_AND_308))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_308)), DUP(op_AND_308))), CAST(32, MSB(DUP(op_AND_308)), DUP(op_AND_308)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_308)), DUP(op_AND_308))), CAST(32, MSB(DUP(op_AND_308)), DUP(op_AND_308)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_322)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_333 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_330), SN(32, 1)); + RzILOpPure *op_ADD_336 = ADD(op_LSHIFT_333, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_338 = SHIFTRA(op_ADD_336, SN(32, 16)); + RzILOpPure *op_ADD_339 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_300), DUP(op_AND_300))), CAST(32, MSB(DUP(op_AND_300)), DUP(op_AND_300))), op_RSHIFT_338); + RzILOpPure *op_EQ_340 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_289), SN(32, 0), SN(32, 0x20)), op_ADD_339); + RzILOpPure *op_RSHIFT_392 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_394 = LOGAND(op_RSHIFT_392, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_400 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_402 = LOGAND(op_RSHIFT_400, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_413 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_416 = LOGAND(op_RSHIFT_413, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_424 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_402), DUP(op_AND_402))), CAST(32, MSB(DUP(op_AND_402)), DUP(op_AND_402)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_402)), DUP(op_AND_402))), CAST(32, MSB(DUP(op_AND_402)), DUP(op_AND_402))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_402)), DUP(op_AND_402))), CAST(32, MSB(DUP(op_AND_402)), DUP(op_AND_402)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_402)), DUP(op_AND_402))), CAST(32, MSB(DUP(op_AND_402)), DUP(op_AND_402)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_416)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_427 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_424), SN(32, 1)); + RzILOpPure *op_ADD_430 = ADD(op_LSHIFT_427, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_432 = SHIFTRA(op_ADD_430, SN(32, 16)); + RzILOpPure *op_ADD_433 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_394), DUP(op_AND_394))), CAST(32, MSB(DUP(op_AND_394)), DUP(op_AND_394))), op_RSHIFT_432); + RzILOpPure *op_LT_436 = SLT(op_ADD_433, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_441 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_442 = NEG(op_LSHIFT_441); + RzILOpPure *op_LSHIFT_447 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_450 = SUB(op_LSHIFT_447, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_451 = ITE(op_LT_436, op_NEG_442, op_SUB_450); + RzILOpEffect *gcc_expr_452 = BRANCH(op_EQ_340, EMPTY(), set_usr_field_call_388); + + // h_tmp295 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_454 = SETL("h_tmp295", cond_451); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_455 = SEQN(2, gcc_expr_452, op_ASSIGN_hybrid_tmp_454); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10)) ? ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10) : h_tmp295) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_239 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_240 = LOGNOT(op_LSHIFT_239); + RzILOpPure *op_AND_241 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_240); + RzILOpPure *op_RSHIFT_344 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_346 = LOGAND(op_RSHIFT_344, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_352 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_354 = LOGAND(op_RSHIFT_352, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_365 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_368 = LOGAND(op_RSHIFT_365, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_376 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_354), DUP(op_AND_354))), CAST(32, MSB(DUP(op_AND_354)), DUP(op_AND_354)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_354)), DUP(op_AND_354))), CAST(32, MSB(DUP(op_AND_354)), DUP(op_AND_354))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_354)), DUP(op_AND_354))), CAST(32, MSB(DUP(op_AND_354)), DUP(op_AND_354)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_354)), DUP(op_AND_354))), CAST(32, MSB(DUP(op_AND_354)), DUP(op_AND_354)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_368)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_379 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_376), SN(32, 1)); + RzILOpPure *op_ADD_382 = ADD(op_LSHIFT_379, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_384 = SHIFTRA(op_ADD_382, SN(32, 16)); + RzILOpPure *op_ADD_385 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_346), DUP(op_AND_346))), CAST(32, MSB(DUP(op_AND_346)), DUP(op_AND_346))), op_RSHIFT_384); + RzILOpPure *cond_456 = ITE(DUP(op_EQ_340), op_ADD_385, VARL("h_tmp295")); + RzILOpPure *op_AND_458 = LOGAND(cond_456, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_462 = SHIFTL0(op_AND_458, SN(32, 0)); + RzILOpPure *op_OR_463 = LOGOR(op_AND_241, op_LSHIFT_462); + RzILOpEffect *op_ASSIGN_464 = WRITE_REG(bundle, Rxx_op, op_OR_463); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_465 = SEQN(2, seq_455, op_ASSIGN_464); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_233, seq_465); + return instruction_sequence; +} + +// Rxx += vmpyweuh(Rss,Rtt):sat +RzILOpEffect *hex_il_op_m2_mmaculs_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_147 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(Rtt, SN(32, 0x20)); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_37, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_48 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_25), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_40)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_51 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_48), SN(32, 0)); + RzILOpPure *op_RSHIFT_53 = SHIFTRA(op_LSHIFT_51, SN(32, 16)); + RzILOpPure *op_ADD_54 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_16), DUP(op_AND_16))), CAST(32, MSB(DUP(op_AND_16)), DUP(op_AND_16))), op_RSHIFT_53); + RzILOpPure *op_RSHIFT_63 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_65 = LOGAND(op_RSHIFT_63, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_71 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_73 = LOGAND(op_RSHIFT_71, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_84 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_87 = LOGAND(op_RSHIFT_84, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_95 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_73), DUP(op_AND_73))), CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73))), CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73))), CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73))), CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_87)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_98 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_95), SN(32, 0)); + RzILOpPure *op_RSHIFT_100 = SHIFTRA(op_LSHIFT_98, SN(32, 16)); + RzILOpPure *op_ADD_101 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_65), DUP(op_AND_65))), CAST(32, MSB(DUP(op_AND_65)), DUP(op_AND_65))), op_RSHIFT_100); + RzILOpPure *op_EQ_102 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_54), SN(32, 0), SN(32, 0x20)), op_ADD_101); + RzILOpPure *op_RSHIFT_151 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_153 = LOGAND(op_RSHIFT_151, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_159 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_161 = LOGAND(op_RSHIFT_159, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_172 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_175 = LOGAND(op_RSHIFT_172, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_183 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_161), DUP(op_AND_161))), CAST(32, MSB(DUP(op_AND_161)), DUP(op_AND_161)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_161)), DUP(op_AND_161))), CAST(32, MSB(DUP(op_AND_161)), DUP(op_AND_161))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_161)), DUP(op_AND_161))), CAST(32, MSB(DUP(op_AND_161)), DUP(op_AND_161)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_161)), DUP(op_AND_161))), CAST(32, MSB(DUP(op_AND_161)), DUP(op_AND_161)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_175)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_186 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_183), SN(32, 0)); + RzILOpPure *op_RSHIFT_188 = SHIFTRA(op_LSHIFT_186, SN(32, 16)); + RzILOpPure *op_ADD_189 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_153), DUP(op_AND_153))), CAST(32, MSB(DUP(op_AND_153)), DUP(op_AND_153))), op_RSHIFT_188); + RzILOpPure *op_LT_192 = SLT(op_ADD_189, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_197 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_198 = NEG(op_LSHIFT_197); + RzILOpPure *op_LSHIFT_203 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_206 = SUB(op_LSHIFT_203, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_207 = ITE(op_LT_192, op_NEG_198, op_SUB_206); + RzILOpEffect *gcc_expr_208 = BRANCH(op_EQ_102, EMPTY(), set_usr_field_call_147); + + // h_tmp296 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_210 = SETL("h_tmp296", cond_207); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_211 = SEQN(2, gcc_expr_208, op_ASSIGN_hybrid_tmp_210); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10)) ? ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10) : h_tmp296) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_106 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_108 = LOGAND(op_RSHIFT_106, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_114 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_116 = LOGAND(op_RSHIFT_114, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_127 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_130 = LOGAND(op_RSHIFT_127, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_138 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_116), DUP(op_AND_116))), CAST(32, MSB(DUP(op_AND_116)), DUP(op_AND_116)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_116)), DUP(op_AND_116))), CAST(32, MSB(DUP(op_AND_116)), DUP(op_AND_116))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_116)), DUP(op_AND_116))), CAST(32, MSB(DUP(op_AND_116)), DUP(op_AND_116)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_116)), DUP(op_AND_116))), CAST(32, MSB(DUP(op_AND_116)), DUP(op_AND_116)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_130)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_141 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_138), SN(32, 0)); + RzILOpPure *op_RSHIFT_143 = SHIFTRA(op_LSHIFT_141, SN(32, 16)); + RzILOpPure *op_ADD_144 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_108), DUP(op_AND_108))), CAST(32, MSB(DUP(op_AND_108)), DUP(op_AND_108))), op_RSHIFT_143); + RzILOpPure *cond_212 = ITE(DUP(op_EQ_102), op_ADD_144, VARL("h_tmp296")); + RzILOpPure *op_AND_214 = LOGAND(cond_212, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_218 = SHIFTL0(op_AND_214, SN(32, 0x20)); + RzILOpPure *op_OR_219 = LOGOR(op_AND_7, op_LSHIFT_218); + RzILOpEffect *op_ASSIGN_220 = WRITE_REG(bundle, Rxx_op, op_OR_219); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_221 = SEQN(2, seq_211, op_ASSIGN_220); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_367 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_236 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_238 = LOGAND(op_RSHIFT_236, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_244 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_246 = LOGAND(op_RSHIFT_244, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_257 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_260 = LOGAND(op_RSHIFT_257, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_268 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_246), DUP(op_AND_246))), CAST(32, MSB(DUP(op_AND_246)), DUP(op_AND_246)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_246)), DUP(op_AND_246))), CAST(32, MSB(DUP(op_AND_246)), DUP(op_AND_246))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_246)), DUP(op_AND_246))), CAST(32, MSB(DUP(op_AND_246)), DUP(op_AND_246)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_246)), DUP(op_AND_246))), CAST(32, MSB(DUP(op_AND_246)), DUP(op_AND_246)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_260)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_271 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_268), SN(32, 0)); + RzILOpPure *op_RSHIFT_273 = SHIFTRA(op_LSHIFT_271, SN(32, 16)); + RzILOpPure *op_ADD_274 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_238), DUP(op_AND_238))), CAST(32, MSB(DUP(op_AND_238)), DUP(op_AND_238))), op_RSHIFT_273); + RzILOpPure *op_RSHIFT_283 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_285 = LOGAND(op_RSHIFT_283, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_291 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_293 = LOGAND(op_RSHIFT_291, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_304 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_307 = LOGAND(op_RSHIFT_304, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_315 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_293), DUP(op_AND_293))), CAST(32, MSB(DUP(op_AND_293)), DUP(op_AND_293)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_293)), DUP(op_AND_293))), CAST(32, MSB(DUP(op_AND_293)), DUP(op_AND_293))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_293)), DUP(op_AND_293))), CAST(32, MSB(DUP(op_AND_293)), DUP(op_AND_293)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_293)), DUP(op_AND_293))), CAST(32, MSB(DUP(op_AND_293)), DUP(op_AND_293)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_307)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_318 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_315), SN(32, 0)); + RzILOpPure *op_RSHIFT_320 = SHIFTRA(op_LSHIFT_318, SN(32, 16)); + RzILOpPure *op_ADD_321 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_285), DUP(op_AND_285))), CAST(32, MSB(DUP(op_AND_285)), DUP(op_AND_285))), op_RSHIFT_320); + RzILOpPure *op_EQ_322 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_274), SN(32, 0), SN(32, 0x20)), op_ADD_321); + RzILOpPure *op_RSHIFT_371 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_373 = LOGAND(op_RSHIFT_371, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_379 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_381 = LOGAND(op_RSHIFT_379, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_392 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_395 = LOGAND(op_RSHIFT_392, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_403 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_381), DUP(op_AND_381))), CAST(32, MSB(DUP(op_AND_381)), DUP(op_AND_381)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_381)), DUP(op_AND_381))), CAST(32, MSB(DUP(op_AND_381)), DUP(op_AND_381))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_381)), DUP(op_AND_381))), CAST(32, MSB(DUP(op_AND_381)), DUP(op_AND_381)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_381)), DUP(op_AND_381))), CAST(32, MSB(DUP(op_AND_381)), DUP(op_AND_381)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_395)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_406 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_403), SN(32, 0)); + RzILOpPure *op_RSHIFT_408 = SHIFTRA(op_LSHIFT_406, SN(32, 16)); + RzILOpPure *op_ADD_409 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_373), DUP(op_AND_373))), CAST(32, MSB(DUP(op_AND_373)), DUP(op_AND_373))), op_RSHIFT_408); + RzILOpPure *op_LT_412 = SLT(op_ADD_409, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_417 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_418 = NEG(op_LSHIFT_417); + RzILOpPure *op_LSHIFT_423 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_426 = SUB(op_LSHIFT_423, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_427 = ITE(op_LT_412, op_NEG_418, op_SUB_426); + RzILOpEffect *gcc_expr_428 = BRANCH(op_EQ_322, EMPTY(), set_usr_field_call_367); + + // h_tmp297 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_430 = SETL("h_tmp297", cond_427); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_431 = SEQN(2, gcc_expr_428, op_ASSIGN_hybrid_tmp_430); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10)) ? ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10) : h_tmp297) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_227 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_228 = LOGNOT(op_LSHIFT_227); + RzILOpPure *op_AND_229 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_228); + RzILOpPure *op_RSHIFT_326 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_328 = LOGAND(op_RSHIFT_326, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_334 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_336 = LOGAND(op_RSHIFT_334, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_347 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_350 = LOGAND(op_RSHIFT_347, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_358 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_336), DUP(op_AND_336))), CAST(32, MSB(DUP(op_AND_336)), DUP(op_AND_336)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_336)), DUP(op_AND_336))), CAST(32, MSB(DUP(op_AND_336)), DUP(op_AND_336))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_336)), DUP(op_AND_336))), CAST(32, MSB(DUP(op_AND_336)), DUP(op_AND_336)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_336)), DUP(op_AND_336))), CAST(32, MSB(DUP(op_AND_336)), DUP(op_AND_336)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_350)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_361 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_358), SN(32, 0)); + RzILOpPure *op_RSHIFT_363 = SHIFTRA(op_LSHIFT_361, SN(32, 16)); + RzILOpPure *op_ADD_364 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_328), DUP(op_AND_328))), CAST(32, MSB(DUP(op_AND_328)), DUP(op_AND_328))), op_RSHIFT_363); + RzILOpPure *cond_432 = ITE(DUP(op_EQ_322), op_ADD_364, VARL("h_tmp297")); + RzILOpPure *op_AND_434 = LOGAND(cond_432, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_438 = SHIFTL0(op_AND_434, SN(32, 0)); + RzILOpPure *op_OR_439 = LOGOR(op_AND_229, op_LSHIFT_438); + RzILOpEffect *op_ASSIGN_440 = WRITE_REG(bundle, Rxx_op, op_OR_439); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_441 = SEQN(2, seq_431, op_ASSIGN_440); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_221, seq_441); + return instruction_sequence; +} + +// Rxx += vmpyweuh(Rss,Rtt):<<1:sat +RzILOpEffect *hex_il_op_m2_mmaculs_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_147 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(Rtt, SN(32, 0x20)); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_37, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_48 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_25), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_40)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_51 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_48), SN(32, 1)); + RzILOpPure *op_RSHIFT_53 = SHIFTRA(op_LSHIFT_51, SN(32, 16)); + RzILOpPure *op_ADD_54 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_16), DUP(op_AND_16))), CAST(32, MSB(DUP(op_AND_16)), DUP(op_AND_16))), op_RSHIFT_53); + RzILOpPure *op_RSHIFT_63 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_65 = LOGAND(op_RSHIFT_63, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_71 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_73 = LOGAND(op_RSHIFT_71, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_84 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_87 = LOGAND(op_RSHIFT_84, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_95 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_73), DUP(op_AND_73))), CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73))), CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73))), CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73))), CAST(32, MSB(DUP(op_AND_73)), DUP(op_AND_73)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_87)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_98 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_95), SN(32, 1)); + RzILOpPure *op_RSHIFT_100 = SHIFTRA(op_LSHIFT_98, SN(32, 16)); + RzILOpPure *op_ADD_101 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_65), DUP(op_AND_65))), CAST(32, MSB(DUP(op_AND_65)), DUP(op_AND_65))), op_RSHIFT_100); + RzILOpPure *op_EQ_102 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_54), SN(32, 0), SN(32, 0x20)), op_ADD_101); + RzILOpPure *op_RSHIFT_151 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_153 = LOGAND(op_RSHIFT_151, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_159 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_161 = LOGAND(op_RSHIFT_159, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_172 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_175 = LOGAND(op_RSHIFT_172, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_183 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_161), DUP(op_AND_161))), CAST(32, MSB(DUP(op_AND_161)), DUP(op_AND_161)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_161)), DUP(op_AND_161))), CAST(32, MSB(DUP(op_AND_161)), DUP(op_AND_161))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_161)), DUP(op_AND_161))), CAST(32, MSB(DUP(op_AND_161)), DUP(op_AND_161)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_161)), DUP(op_AND_161))), CAST(32, MSB(DUP(op_AND_161)), DUP(op_AND_161)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_175)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_186 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_183), SN(32, 1)); + RzILOpPure *op_RSHIFT_188 = SHIFTRA(op_LSHIFT_186, SN(32, 16)); + RzILOpPure *op_ADD_189 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_153), DUP(op_AND_153))), CAST(32, MSB(DUP(op_AND_153)), DUP(op_AND_153))), op_RSHIFT_188); + RzILOpPure *op_LT_192 = SLT(op_ADD_189, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_197 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_198 = NEG(op_LSHIFT_197); + RzILOpPure *op_LSHIFT_203 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_206 = SUB(op_LSHIFT_203, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_207 = ITE(op_LT_192, op_NEG_198, op_SUB_206); + RzILOpEffect *gcc_expr_208 = BRANCH(op_EQ_102, EMPTY(), set_usr_field_call_147); + + // h_tmp298 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_210 = SETL("h_tmp298", cond_207); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_211 = SEQN(2, gcc_expr_208, op_ASSIGN_hybrid_tmp_210); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10)) ? ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10) : h_tmp298) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_106 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_108 = LOGAND(op_RSHIFT_106, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_114 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_116 = LOGAND(op_RSHIFT_114, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_127 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_130 = LOGAND(op_RSHIFT_127, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_138 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_116), DUP(op_AND_116))), CAST(32, MSB(DUP(op_AND_116)), DUP(op_AND_116)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_116)), DUP(op_AND_116))), CAST(32, MSB(DUP(op_AND_116)), DUP(op_AND_116))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_116)), DUP(op_AND_116))), CAST(32, MSB(DUP(op_AND_116)), DUP(op_AND_116)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_116)), DUP(op_AND_116))), CAST(32, MSB(DUP(op_AND_116)), DUP(op_AND_116)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_130)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_141 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_138), SN(32, 1)); + RzILOpPure *op_RSHIFT_143 = SHIFTRA(op_LSHIFT_141, SN(32, 16)); + RzILOpPure *op_ADD_144 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_108), DUP(op_AND_108))), CAST(32, MSB(DUP(op_AND_108)), DUP(op_AND_108))), op_RSHIFT_143); + RzILOpPure *cond_212 = ITE(DUP(op_EQ_102), op_ADD_144, VARL("h_tmp298")); + RzILOpPure *op_AND_214 = LOGAND(cond_212, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_218 = SHIFTL0(op_AND_214, SN(32, 0x20)); + RzILOpPure *op_OR_219 = LOGOR(op_AND_7, op_LSHIFT_218); + RzILOpEffect *op_ASSIGN_220 = WRITE_REG(bundle, Rxx_op, op_OR_219); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_221 = SEQN(2, seq_211, op_ASSIGN_220); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_367 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_236 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_238 = LOGAND(op_RSHIFT_236, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_244 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_246 = LOGAND(op_RSHIFT_244, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_257 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_260 = LOGAND(op_RSHIFT_257, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_268 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_246), DUP(op_AND_246))), CAST(32, MSB(DUP(op_AND_246)), DUP(op_AND_246)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_246)), DUP(op_AND_246))), CAST(32, MSB(DUP(op_AND_246)), DUP(op_AND_246))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_246)), DUP(op_AND_246))), CAST(32, MSB(DUP(op_AND_246)), DUP(op_AND_246)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_246)), DUP(op_AND_246))), CAST(32, MSB(DUP(op_AND_246)), DUP(op_AND_246)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_260)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_271 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_268), SN(32, 1)); + RzILOpPure *op_RSHIFT_273 = SHIFTRA(op_LSHIFT_271, SN(32, 16)); + RzILOpPure *op_ADD_274 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_238), DUP(op_AND_238))), CAST(32, MSB(DUP(op_AND_238)), DUP(op_AND_238))), op_RSHIFT_273); + RzILOpPure *op_RSHIFT_283 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_285 = LOGAND(op_RSHIFT_283, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_291 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_293 = LOGAND(op_RSHIFT_291, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_304 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_307 = LOGAND(op_RSHIFT_304, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_315 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_293), DUP(op_AND_293))), CAST(32, MSB(DUP(op_AND_293)), DUP(op_AND_293)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_293)), DUP(op_AND_293))), CAST(32, MSB(DUP(op_AND_293)), DUP(op_AND_293))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_293)), DUP(op_AND_293))), CAST(32, MSB(DUP(op_AND_293)), DUP(op_AND_293)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_293)), DUP(op_AND_293))), CAST(32, MSB(DUP(op_AND_293)), DUP(op_AND_293)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_307)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_318 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_315), SN(32, 1)); + RzILOpPure *op_RSHIFT_320 = SHIFTRA(op_LSHIFT_318, SN(32, 16)); + RzILOpPure *op_ADD_321 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_285), DUP(op_AND_285))), CAST(32, MSB(DUP(op_AND_285)), DUP(op_AND_285))), op_RSHIFT_320); + RzILOpPure *op_EQ_322 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_274), SN(32, 0), SN(32, 0x20)), op_ADD_321); + RzILOpPure *op_RSHIFT_371 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_373 = LOGAND(op_RSHIFT_371, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_379 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_381 = LOGAND(op_RSHIFT_379, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_392 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_395 = LOGAND(op_RSHIFT_392, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_403 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_381), DUP(op_AND_381))), CAST(32, MSB(DUP(op_AND_381)), DUP(op_AND_381)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_381)), DUP(op_AND_381))), CAST(32, MSB(DUP(op_AND_381)), DUP(op_AND_381))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_381)), DUP(op_AND_381))), CAST(32, MSB(DUP(op_AND_381)), DUP(op_AND_381)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_381)), DUP(op_AND_381))), CAST(32, MSB(DUP(op_AND_381)), DUP(op_AND_381)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_395)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_406 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_403), SN(32, 1)); + RzILOpPure *op_RSHIFT_408 = SHIFTRA(op_LSHIFT_406, SN(32, 16)); + RzILOpPure *op_ADD_409 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_373), DUP(op_AND_373))), CAST(32, MSB(DUP(op_AND_373)), DUP(op_AND_373))), op_RSHIFT_408); + RzILOpPure *op_LT_412 = SLT(op_ADD_409, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_417 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_418 = NEG(op_LSHIFT_417); + RzILOpPure *op_LSHIFT_423 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_426 = SUB(op_LSHIFT_423, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_427 = ITE(op_LT_412, op_NEG_418, op_SUB_426); + RzILOpEffect *gcc_expr_428 = BRANCH(op_EQ_322, EMPTY(), set_usr_field_call_367); + + // h_tmp299 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_430 = SETL("h_tmp299", cond_427); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_431 = SEQN(2, gcc_expr_428, op_ASSIGN_hybrid_tmp_430); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10)) ? ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10) : h_tmp299) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_227 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_228 = LOGNOT(op_LSHIFT_227); + RzILOpPure *op_AND_229 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_228); + RzILOpPure *op_RSHIFT_326 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_328 = LOGAND(op_RSHIFT_326, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_334 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_336 = LOGAND(op_RSHIFT_334, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_347 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_350 = LOGAND(op_RSHIFT_347, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_358 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_336), DUP(op_AND_336))), CAST(32, MSB(DUP(op_AND_336)), DUP(op_AND_336)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_336)), DUP(op_AND_336))), CAST(32, MSB(DUP(op_AND_336)), DUP(op_AND_336))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_336)), DUP(op_AND_336))), CAST(32, MSB(DUP(op_AND_336)), DUP(op_AND_336)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_336)), DUP(op_AND_336))), CAST(32, MSB(DUP(op_AND_336)), DUP(op_AND_336)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_350)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_361 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_358), SN(32, 1)); + RzILOpPure *op_RSHIFT_363 = SHIFTRA(op_LSHIFT_361, SN(32, 16)); + RzILOpPure *op_ADD_364 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_328), DUP(op_AND_328))), CAST(32, MSB(DUP(op_AND_328)), DUP(op_AND_328))), op_RSHIFT_363); + RzILOpPure *cond_432 = ITE(DUP(op_EQ_322), op_ADD_364, VARL("h_tmp299")); + RzILOpPure *op_AND_434 = LOGAND(cond_432, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_438 = SHIFTL0(op_AND_434, SN(32, 0)); + RzILOpPure *op_OR_439 = LOGOR(op_AND_229, op_LSHIFT_438); + RzILOpEffect *op_ASSIGN_440 = WRITE_REG(bundle, Rxx_op, op_OR_439); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_441 = SEQN(2, seq_431, op_ASSIGN_440); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_221, seq_441); + return instruction_sequence; +} + +// Rdd = vmpywoh(Rss,Rtt):rnd:sat +RzILOpEffect *hex_il_op_m2_mmpyh_rs0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_123 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_17 = LOGAND(op_RSHIFT_15, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_29 = SHIFTRA(Rtt, SN(32, 0x30)); + RzILOpPure *op_AND_32 = LOGAND(op_RSHIFT_29, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_39 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_17), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_32), DUP(op_AND_32))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_41 = SHIFTL0(op_MUL_39, SN(32, 0)); + RzILOpPure *op_ADD_44 = ADD(op_LSHIFT_41, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_46 = SHIFTRA(op_ADD_44, SN(32, 16)); + RzILOpPure *op_RSHIFT_55 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_57 = LOGAND(op_RSHIFT_55, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_68 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_71 = LOGAND(op_RSHIFT_68, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_78 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_57), DUP(op_AND_57))), CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57))), CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57))), CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57))), CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_71), DUP(op_AND_71))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_80 = SHIFTL0(op_MUL_78, SN(32, 0)); + RzILOpPure *op_ADD_83 = ADD(op_LSHIFT_80, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_85 = SHIFTRA(op_ADD_83, SN(32, 16)); + RzILOpPure *op_EQ_86 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_46), SN(32, 0), SN(32, 0x20)), op_RSHIFT_85); + RzILOpPure *op_RSHIFT_127 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_129 = LOGAND(op_RSHIFT_127, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_140 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_143 = LOGAND(op_RSHIFT_140, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_150 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_129), DUP(op_AND_129))), CAST(32, MSB(DUP(op_AND_129)), DUP(op_AND_129)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_129)), DUP(op_AND_129))), CAST(32, MSB(DUP(op_AND_129)), DUP(op_AND_129))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_129)), DUP(op_AND_129))), CAST(32, MSB(DUP(op_AND_129)), DUP(op_AND_129)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_129)), DUP(op_AND_129))), CAST(32, MSB(DUP(op_AND_129)), DUP(op_AND_129))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_143), DUP(op_AND_143))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_152 = SHIFTL0(op_MUL_150, SN(32, 0)); + RzILOpPure *op_ADD_155 = ADD(op_LSHIFT_152, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_157 = SHIFTRA(op_ADD_155, SN(32, 16)); + RzILOpPure *op_LT_160 = SLT(op_RSHIFT_157, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_165 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_166 = NEG(op_LSHIFT_165); + RzILOpPure *op_LSHIFT_171 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_174 = SUB(op_LSHIFT_171, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_175 = ITE(op_LT_160, op_NEG_166, op_SUB_174); + RzILOpEffect *gcc_expr_176 = BRANCH(op_EQ_86, EMPTY(), set_usr_field_call_123); + + // h_tmp300 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_178 = SETL("h_tmp300", cond_175); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st6 ...; + RzILOpEffect *seq_179 = SEQN(2, gcc_expr_176, op_ASSIGN_hybrid_tmp_178); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10)) ? ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10) : h_tmp300) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_90 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_92 = LOGAND(op_RSHIFT_90, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_103 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_106 = LOGAND(op_RSHIFT_103, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_113 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_92), DUP(op_AND_92))), CAST(32, MSB(DUP(op_AND_92)), DUP(op_AND_92)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_92)), DUP(op_AND_92))), CAST(32, MSB(DUP(op_AND_92)), DUP(op_AND_92))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_92)), DUP(op_AND_92))), CAST(32, MSB(DUP(op_AND_92)), DUP(op_AND_92)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_92)), DUP(op_AND_92))), CAST(32, MSB(DUP(op_AND_92)), DUP(op_AND_92))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_106), DUP(op_AND_106))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_115 = SHIFTL0(op_MUL_113, SN(32, 0)); + RzILOpPure *op_ADD_118 = ADD(op_LSHIFT_115, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_120 = SHIFTRA(op_ADD_118, SN(32, 16)); + RzILOpPure *cond_180 = ITE(DUP(op_EQ_86), op_RSHIFT_120, VARL("h_tmp300")); + RzILOpPure *op_AND_182 = LOGAND(cond_180, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_186 = SHIFTL0(op_AND_182, SN(32, 0x20)); + RzILOpPure *op_OR_187 = LOGOR(op_AND_7, op_LSHIFT_186); + RzILOpEffect *op_ASSIGN_188 = WRITE_REG(bundle, Rdd_op, op_OR_187); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ( ...; + RzILOpEffect *seq_189 = SEQN(2, seq_179, op_ASSIGN_188); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_311 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_204 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_206 = LOGAND(op_RSHIFT_204, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_217 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_220 = LOGAND(op_RSHIFT_217, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_227 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_206), DUP(op_AND_206))), CAST(32, MSB(DUP(op_AND_206)), DUP(op_AND_206)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_206)), DUP(op_AND_206))), CAST(32, MSB(DUP(op_AND_206)), DUP(op_AND_206))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_206)), DUP(op_AND_206))), CAST(32, MSB(DUP(op_AND_206)), DUP(op_AND_206)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_206)), DUP(op_AND_206))), CAST(32, MSB(DUP(op_AND_206)), DUP(op_AND_206))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_220), DUP(op_AND_220))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_229 = SHIFTL0(op_MUL_227, SN(32, 0)); + RzILOpPure *op_ADD_232 = ADD(op_LSHIFT_229, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_234 = SHIFTRA(op_ADD_232, SN(32, 16)); + RzILOpPure *op_RSHIFT_243 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_245 = LOGAND(op_RSHIFT_243, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_256 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_259 = LOGAND(op_RSHIFT_256, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_266 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_245), DUP(op_AND_245))), CAST(32, MSB(DUP(op_AND_245)), DUP(op_AND_245)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_245)), DUP(op_AND_245))), CAST(32, MSB(DUP(op_AND_245)), DUP(op_AND_245))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_245)), DUP(op_AND_245))), CAST(32, MSB(DUP(op_AND_245)), DUP(op_AND_245)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_245)), DUP(op_AND_245))), CAST(32, MSB(DUP(op_AND_245)), DUP(op_AND_245))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_259), DUP(op_AND_259))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_268 = SHIFTL0(op_MUL_266, SN(32, 0)); + RzILOpPure *op_ADD_271 = ADD(op_LSHIFT_268, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_273 = SHIFTRA(op_ADD_271, SN(32, 16)); + RzILOpPure *op_EQ_274 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_234), SN(32, 0), SN(32, 0x20)), op_RSHIFT_273); + RzILOpPure *op_RSHIFT_315 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_317 = LOGAND(op_RSHIFT_315, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_328 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_331 = LOGAND(op_RSHIFT_328, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_338 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_317), DUP(op_AND_317))), CAST(32, MSB(DUP(op_AND_317)), DUP(op_AND_317)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_317)), DUP(op_AND_317))), CAST(32, MSB(DUP(op_AND_317)), DUP(op_AND_317))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_317)), DUP(op_AND_317))), CAST(32, MSB(DUP(op_AND_317)), DUP(op_AND_317)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_317)), DUP(op_AND_317))), CAST(32, MSB(DUP(op_AND_317)), DUP(op_AND_317))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_331), DUP(op_AND_331))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_340 = SHIFTL0(op_MUL_338, SN(32, 0)); + RzILOpPure *op_ADD_343 = ADD(op_LSHIFT_340, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_345 = SHIFTRA(op_ADD_343, SN(32, 16)); + RzILOpPure *op_LT_348 = SLT(op_RSHIFT_345, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_353 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_354 = NEG(op_LSHIFT_353); + RzILOpPure *op_LSHIFT_359 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_362 = SUB(op_LSHIFT_359, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_363 = ITE(op_LT_348, op_NEG_354, op_SUB_362); + RzILOpEffect *gcc_expr_364 = BRANCH(op_EQ_274, EMPTY(), set_usr_field_call_311); + + // h_tmp301 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_366 = SETL("h_tmp301", cond_363); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st6 ...; + RzILOpEffect *seq_367 = SEQN(2, gcc_expr_364, op_ASSIGN_hybrid_tmp_366); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10)) ? ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10) : h_tmp301) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_195 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_196 = LOGNOT(op_LSHIFT_195); + RzILOpPure *op_AND_197 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_196); + RzILOpPure *op_RSHIFT_278 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_280 = LOGAND(op_RSHIFT_278, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_291 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_294 = LOGAND(op_RSHIFT_291, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_301 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_280), DUP(op_AND_280))), CAST(32, MSB(DUP(op_AND_280)), DUP(op_AND_280)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_280)), DUP(op_AND_280))), CAST(32, MSB(DUP(op_AND_280)), DUP(op_AND_280))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_280)), DUP(op_AND_280))), CAST(32, MSB(DUP(op_AND_280)), DUP(op_AND_280)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_280)), DUP(op_AND_280))), CAST(32, MSB(DUP(op_AND_280)), DUP(op_AND_280))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_294), DUP(op_AND_294))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_303 = SHIFTL0(op_MUL_301, SN(32, 0)); + RzILOpPure *op_ADD_306 = ADD(op_LSHIFT_303, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_308 = SHIFTRA(op_ADD_306, SN(32, 16)); + RzILOpPure *cond_368 = ITE(DUP(op_EQ_274), op_RSHIFT_308, VARL("h_tmp301")); + RzILOpPure *op_AND_370 = LOGAND(cond_368, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_374 = SHIFTL0(op_AND_370, SN(32, 0)); + RzILOpPure *op_OR_375 = LOGOR(op_AND_197, op_LSHIFT_374); + RzILOpEffect *op_ASSIGN_376 = WRITE_REG(bundle, Rdd_op, op_OR_375); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ( ...; + RzILOpEffect *seq_377 = SEQN(2, seq_367, op_ASSIGN_376); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_189, seq_377); + return instruction_sequence; +} + +// Rdd = vmpywoh(Rss,Rtt):<<1:rnd:sat +RzILOpEffect *hex_il_op_m2_mmpyh_rs1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_123 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_17 = LOGAND(op_RSHIFT_15, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_29 = SHIFTRA(Rtt, SN(32, 0x30)); + RzILOpPure *op_AND_32 = LOGAND(op_RSHIFT_29, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_39 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_17), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_32), DUP(op_AND_32))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_41 = SHIFTL0(op_MUL_39, SN(32, 1)); + RzILOpPure *op_ADD_44 = ADD(op_LSHIFT_41, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_46 = SHIFTRA(op_ADD_44, SN(32, 16)); + RzILOpPure *op_RSHIFT_55 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_57 = LOGAND(op_RSHIFT_55, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_68 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_71 = LOGAND(op_RSHIFT_68, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_78 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_57), DUP(op_AND_57))), CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57))), CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57))), CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57))), CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_71), DUP(op_AND_71))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_80 = SHIFTL0(op_MUL_78, SN(32, 1)); + RzILOpPure *op_ADD_83 = ADD(op_LSHIFT_80, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_85 = SHIFTRA(op_ADD_83, SN(32, 16)); + RzILOpPure *op_EQ_86 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_46), SN(32, 0), SN(32, 0x20)), op_RSHIFT_85); + RzILOpPure *op_RSHIFT_127 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_129 = LOGAND(op_RSHIFT_127, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_140 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_143 = LOGAND(op_RSHIFT_140, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_150 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_129), DUP(op_AND_129))), CAST(32, MSB(DUP(op_AND_129)), DUP(op_AND_129)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_129)), DUP(op_AND_129))), CAST(32, MSB(DUP(op_AND_129)), DUP(op_AND_129))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_129)), DUP(op_AND_129))), CAST(32, MSB(DUP(op_AND_129)), DUP(op_AND_129)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_129)), DUP(op_AND_129))), CAST(32, MSB(DUP(op_AND_129)), DUP(op_AND_129))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_143), DUP(op_AND_143))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_152 = SHIFTL0(op_MUL_150, SN(32, 1)); + RzILOpPure *op_ADD_155 = ADD(op_LSHIFT_152, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_157 = SHIFTRA(op_ADD_155, SN(32, 16)); + RzILOpPure *op_LT_160 = SLT(op_RSHIFT_157, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_165 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_166 = NEG(op_LSHIFT_165); + RzILOpPure *op_LSHIFT_171 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_174 = SUB(op_LSHIFT_171, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_175 = ITE(op_LT_160, op_NEG_166, op_SUB_174); + RzILOpEffect *gcc_expr_176 = BRANCH(op_EQ_86, EMPTY(), set_usr_field_call_123); + + // h_tmp302 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_178 = SETL("h_tmp302", cond_175); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st6 ...; + RzILOpEffect *seq_179 = SEQN(2, gcc_expr_176, op_ASSIGN_hybrid_tmp_178); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)) ? ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10) : h_tmp302) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_90 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_92 = LOGAND(op_RSHIFT_90, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_103 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_106 = LOGAND(op_RSHIFT_103, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_113 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_92), DUP(op_AND_92))), CAST(32, MSB(DUP(op_AND_92)), DUP(op_AND_92)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_92)), DUP(op_AND_92))), CAST(32, MSB(DUP(op_AND_92)), DUP(op_AND_92))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_92)), DUP(op_AND_92))), CAST(32, MSB(DUP(op_AND_92)), DUP(op_AND_92)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_92)), DUP(op_AND_92))), CAST(32, MSB(DUP(op_AND_92)), DUP(op_AND_92))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_106), DUP(op_AND_106))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_115 = SHIFTL0(op_MUL_113, SN(32, 1)); + RzILOpPure *op_ADD_118 = ADD(op_LSHIFT_115, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_120 = SHIFTRA(op_ADD_118, SN(32, 16)); + RzILOpPure *cond_180 = ITE(DUP(op_EQ_86), op_RSHIFT_120, VARL("h_tmp302")); + RzILOpPure *op_AND_182 = LOGAND(cond_180, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_186 = SHIFTL0(op_AND_182, SN(32, 0x20)); + RzILOpPure *op_OR_187 = LOGOR(op_AND_7, op_LSHIFT_186); + RzILOpEffect *op_ASSIGN_188 = WRITE_REG(bundle, Rdd_op, op_OR_187); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ( ...; + RzILOpEffect *seq_189 = SEQN(2, seq_179, op_ASSIGN_188); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_311 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_204 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_206 = LOGAND(op_RSHIFT_204, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_217 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_220 = LOGAND(op_RSHIFT_217, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_227 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_206), DUP(op_AND_206))), CAST(32, MSB(DUP(op_AND_206)), DUP(op_AND_206)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_206)), DUP(op_AND_206))), CAST(32, MSB(DUP(op_AND_206)), DUP(op_AND_206))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_206)), DUP(op_AND_206))), CAST(32, MSB(DUP(op_AND_206)), DUP(op_AND_206)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_206)), DUP(op_AND_206))), CAST(32, MSB(DUP(op_AND_206)), DUP(op_AND_206))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_220), DUP(op_AND_220))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_229 = SHIFTL0(op_MUL_227, SN(32, 1)); + RzILOpPure *op_ADD_232 = ADD(op_LSHIFT_229, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_234 = SHIFTRA(op_ADD_232, SN(32, 16)); + RzILOpPure *op_RSHIFT_243 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_245 = LOGAND(op_RSHIFT_243, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_256 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_259 = LOGAND(op_RSHIFT_256, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_266 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_245), DUP(op_AND_245))), CAST(32, MSB(DUP(op_AND_245)), DUP(op_AND_245)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_245)), DUP(op_AND_245))), CAST(32, MSB(DUP(op_AND_245)), DUP(op_AND_245))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_245)), DUP(op_AND_245))), CAST(32, MSB(DUP(op_AND_245)), DUP(op_AND_245)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_245)), DUP(op_AND_245))), CAST(32, MSB(DUP(op_AND_245)), DUP(op_AND_245))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_259), DUP(op_AND_259))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_268 = SHIFTL0(op_MUL_266, SN(32, 1)); + RzILOpPure *op_ADD_271 = ADD(op_LSHIFT_268, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_273 = SHIFTRA(op_ADD_271, SN(32, 16)); + RzILOpPure *op_EQ_274 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_234), SN(32, 0), SN(32, 0x20)), op_RSHIFT_273); + RzILOpPure *op_RSHIFT_315 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_317 = LOGAND(op_RSHIFT_315, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_328 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_331 = LOGAND(op_RSHIFT_328, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_338 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_317), DUP(op_AND_317))), CAST(32, MSB(DUP(op_AND_317)), DUP(op_AND_317)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_317)), DUP(op_AND_317))), CAST(32, MSB(DUP(op_AND_317)), DUP(op_AND_317))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_317)), DUP(op_AND_317))), CAST(32, MSB(DUP(op_AND_317)), DUP(op_AND_317)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_317)), DUP(op_AND_317))), CAST(32, MSB(DUP(op_AND_317)), DUP(op_AND_317))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_331), DUP(op_AND_331))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_340 = SHIFTL0(op_MUL_338, SN(32, 1)); + RzILOpPure *op_ADD_343 = ADD(op_LSHIFT_340, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_345 = SHIFTRA(op_ADD_343, SN(32, 16)); + RzILOpPure *op_LT_348 = SLT(op_RSHIFT_345, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_353 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_354 = NEG(op_LSHIFT_353); + RzILOpPure *op_LSHIFT_359 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_362 = SUB(op_LSHIFT_359, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_363 = ITE(op_LT_348, op_NEG_354, op_SUB_362); + RzILOpEffect *gcc_expr_364 = BRANCH(op_EQ_274, EMPTY(), set_usr_field_call_311); + + // h_tmp303 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_366 = SETL("h_tmp303", cond_363); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st6 ...; + RzILOpEffect *seq_367 = SEQN(2, gcc_expr_364, op_ASSIGN_hybrid_tmp_366); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)) ? ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10) : h_tmp303) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_195 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_196 = LOGNOT(op_LSHIFT_195); + RzILOpPure *op_AND_197 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_196); + RzILOpPure *op_RSHIFT_278 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_280 = LOGAND(op_RSHIFT_278, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_291 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_294 = LOGAND(op_RSHIFT_291, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_301 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_280), DUP(op_AND_280))), CAST(32, MSB(DUP(op_AND_280)), DUP(op_AND_280)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_280)), DUP(op_AND_280))), CAST(32, MSB(DUP(op_AND_280)), DUP(op_AND_280))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_280)), DUP(op_AND_280))), CAST(32, MSB(DUP(op_AND_280)), DUP(op_AND_280)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_280)), DUP(op_AND_280))), CAST(32, MSB(DUP(op_AND_280)), DUP(op_AND_280))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_294), DUP(op_AND_294))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_303 = SHIFTL0(op_MUL_301, SN(32, 1)); + RzILOpPure *op_ADD_306 = ADD(op_LSHIFT_303, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_308 = SHIFTRA(op_ADD_306, SN(32, 16)); + RzILOpPure *cond_368 = ITE(DUP(op_EQ_274), op_RSHIFT_308, VARL("h_tmp303")); + RzILOpPure *op_AND_370 = LOGAND(cond_368, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_374 = SHIFTL0(op_AND_370, SN(32, 0)); + RzILOpPure *op_OR_375 = LOGOR(op_AND_197, op_LSHIFT_374); + RzILOpEffect *op_ASSIGN_376 = WRITE_REG(bundle, Rdd_op, op_OR_375); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ( ...; + RzILOpEffect *seq_377 = SEQN(2, seq_367, op_ASSIGN_376); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_189, seq_377); + return instruction_sequence; +} + +// Rdd = vmpywoh(Rss,Rtt):sat +RzILOpEffect *hex_il_op_m2_mmpyh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_114 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_17 = LOGAND(op_RSHIFT_15, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_29 = SHIFTRA(Rtt, SN(32, 0x30)); + RzILOpPure *op_AND_32 = LOGAND(op_RSHIFT_29, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_39 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_17), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_32), DUP(op_AND_32))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_41 = SHIFTL0(op_MUL_39, SN(32, 0)); + RzILOpPure *op_RSHIFT_43 = SHIFTRA(op_LSHIFT_41, SN(32, 16)); + RzILOpPure *op_RSHIFT_52 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_54 = LOGAND(op_RSHIFT_52, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_65 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_68 = LOGAND(op_RSHIFT_65, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_75 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_54), DUP(op_AND_54))), CAST(32, MSB(DUP(op_AND_54)), DUP(op_AND_54)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_54)), DUP(op_AND_54))), CAST(32, MSB(DUP(op_AND_54)), DUP(op_AND_54))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_54)), DUP(op_AND_54))), CAST(32, MSB(DUP(op_AND_54)), DUP(op_AND_54)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_54)), DUP(op_AND_54))), CAST(32, MSB(DUP(op_AND_54)), DUP(op_AND_54))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_68), DUP(op_AND_68))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_77 = SHIFTL0(op_MUL_75, SN(32, 0)); + RzILOpPure *op_RSHIFT_79 = SHIFTRA(op_LSHIFT_77, SN(32, 16)); + RzILOpPure *op_EQ_80 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_43), SN(32, 0), SN(32, 0x20)), op_RSHIFT_79); + RzILOpPure *op_RSHIFT_118 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_120 = LOGAND(op_RSHIFT_118, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_131 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_134 = LOGAND(op_RSHIFT_131, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_141 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_120), DUP(op_AND_120))), CAST(32, MSB(DUP(op_AND_120)), DUP(op_AND_120)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_120)), DUP(op_AND_120))), CAST(32, MSB(DUP(op_AND_120)), DUP(op_AND_120))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_120)), DUP(op_AND_120))), CAST(32, MSB(DUP(op_AND_120)), DUP(op_AND_120)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_120)), DUP(op_AND_120))), CAST(32, MSB(DUP(op_AND_120)), DUP(op_AND_120))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_134), DUP(op_AND_134))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_143 = SHIFTL0(op_MUL_141, SN(32, 0)); + RzILOpPure *op_RSHIFT_145 = SHIFTRA(op_LSHIFT_143, SN(32, 16)); + RzILOpPure *op_LT_148 = SLT(op_RSHIFT_145, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_153 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_154 = NEG(op_LSHIFT_153); + RzILOpPure *op_LSHIFT_159 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_162 = SUB(op_LSHIFT_159, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_163 = ITE(op_LT_148, op_NEG_154, op_SUB_162); + RzILOpEffect *gcc_expr_164 = BRANCH(op_EQ_80, EMPTY(), set_usr_field_call_114); + + // h_tmp304 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_166 = SETL("h_tmp304", cond_163); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st6 ...; + RzILOpEffect *seq_167 = SEQN(2, gcc_expr_164, op_ASSIGN_hybrid_tmp_166); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10)) ? ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10) : h_tmp304) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_84 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_86 = LOGAND(op_RSHIFT_84, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_97 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_100 = LOGAND(op_RSHIFT_97, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_107 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_86), DUP(op_AND_86))), CAST(32, MSB(DUP(op_AND_86)), DUP(op_AND_86)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_86)), DUP(op_AND_86))), CAST(32, MSB(DUP(op_AND_86)), DUP(op_AND_86))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_86)), DUP(op_AND_86))), CAST(32, MSB(DUP(op_AND_86)), DUP(op_AND_86)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_86)), DUP(op_AND_86))), CAST(32, MSB(DUP(op_AND_86)), DUP(op_AND_86))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_100), DUP(op_AND_100))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_109 = SHIFTL0(op_MUL_107, SN(32, 0)); + RzILOpPure *op_RSHIFT_111 = SHIFTRA(op_LSHIFT_109, SN(32, 16)); + RzILOpPure *cond_168 = ITE(DUP(op_EQ_80), op_RSHIFT_111, VARL("h_tmp304")); + RzILOpPure *op_AND_170 = LOGAND(cond_168, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_174 = SHIFTL0(op_AND_170, SN(32, 0x20)); + RzILOpPure *op_OR_175 = LOGOR(op_AND_7, op_LSHIFT_174); + RzILOpEffect *op_ASSIGN_176 = WRITE_REG(bundle, Rdd_op, op_OR_175); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ( ...; + RzILOpEffect *seq_177 = SEQN(2, seq_167, op_ASSIGN_176); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_290 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_192 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_194 = LOGAND(op_RSHIFT_192, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_205 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_208 = LOGAND(op_RSHIFT_205, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_215 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_194), DUP(op_AND_194))), CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))), CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))), CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))), CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_208), DUP(op_AND_208))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_217 = SHIFTL0(op_MUL_215, SN(32, 0)); + RzILOpPure *op_RSHIFT_219 = SHIFTRA(op_LSHIFT_217, SN(32, 16)); + RzILOpPure *op_RSHIFT_228 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_230 = LOGAND(op_RSHIFT_228, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_241 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_244 = LOGAND(op_RSHIFT_241, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_251 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_230), DUP(op_AND_230))), CAST(32, MSB(DUP(op_AND_230)), DUP(op_AND_230)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_230)), DUP(op_AND_230))), CAST(32, MSB(DUP(op_AND_230)), DUP(op_AND_230))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_230)), DUP(op_AND_230))), CAST(32, MSB(DUP(op_AND_230)), DUP(op_AND_230)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_230)), DUP(op_AND_230))), CAST(32, MSB(DUP(op_AND_230)), DUP(op_AND_230))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_244), DUP(op_AND_244))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_253 = SHIFTL0(op_MUL_251, SN(32, 0)); + RzILOpPure *op_RSHIFT_255 = SHIFTRA(op_LSHIFT_253, SN(32, 16)); + RzILOpPure *op_EQ_256 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_219), SN(32, 0), SN(32, 0x20)), op_RSHIFT_255); + RzILOpPure *op_RSHIFT_294 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_296 = LOGAND(op_RSHIFT_294, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_307 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_310 = LOGAND(op_RSHIFT_307, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_317 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_296), DUP(op_AND_296))), CAST(32, MSB(DUP(op_AND_296)), DUP(op_AND_296)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_296)), DUP(op_AND_296))), CAST(32, MSB(DUP(op_AND_296)), DUP(op_AND_296))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_296)), DUP(op_AND_296))), CAST(32, MSB(DUP(op_AND_296)), DUP(op_AND_296)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_296)), DUP(op_AND_296))), CAST(32, MSB(DUP(op_AND_296)), DUP(op_AND_296))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_310), DUP(op_AND_310))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_319 = SHIFTL0(op_MUL_317, SN(32, 0)); + RzILOpPure *op_RSHIFT_321 = SHIFTRA(op_LSHIFT_319, SN(32, 16)); + RzILOpPure *op_LT_324 = SLT(op_RSHIFT_321, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_329 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_330 = NEG(op_LSHIFT_329); + RzILOpPure *op_LSHIFT_335 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_338 = SUB(op_LSHIFT_335, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_339 = ITE(op_LT_324, op_NEG_330, op_SUB_338); + RzILOpEffect *gcc_expr_340 = BRANCH(op_EQ_256, EMPTY(), set_usr_field_call_290); + + // h_tmp305 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_342 = SETL("h_tmp305", cond_339); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st6 ...; + RzILOpEffect *seq_343 = SEQN(2, gcc_expr_340, op_ASSIGN_hybrid_tmp_342); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10)) ? ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10) : h_tmp305) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_183 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_184 = LOGNOT(op_LSHIFT_183); + RzILOpPure *op_AND_185 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_184); + RzILOpPure *op_RSHIFT_260 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_262 = LOGAND(op_RSHIFT_260, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_273 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_276 = LOGAND(op_RSHIFT_273, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_283 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_262), DUP(op_AND_262))), CAST(32, MSB(DUP(op_AND_262)), DUP(op_AND_262)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_262)), DUP(op_AND_262))), CAST(32, MSB(DUP(op_AND_262)), DUP(op_AND_262))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_262)), DUP(op_AND_262))), CAST(32, MSB(DUP(op_AND_262)), DUP(op_AND_262)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_262)), DUP(op_AND_262))), CAST(32, MSB(DUP(op_AND_262)), DUP(op_AND_262))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_276), DUP(op_AND_276))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_285 = SHIFTL0(op_MUL_283, SN(32, 0)); + RzILOpPure *op_RSHIFT_287 = SHIFTRA(op_LSHIFT_285, SN(32, 16)); + RzILOpPure *cond_344 = ITE(DUP(op_EQ_256), op_RSHIFT_287, VARL("h_tmp305")); + RzILOpPure *op_AND_346 = LOGAND(cond_344, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_350 = SHIFTL0(op_AND_346, SN(32, 0)); + RzILOpPure *op_OR_351 = LOGOR(op_AND_185, op_LSHIFT_350); + RzILOpEffect *op_ASSIGN_352 = WRITE_REG(bundle, Rdd_op, op_OR_351); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ( ...; + RzILOpEffect *seq_353 = SEQN(2, seq_343, op_ASSIGN_352); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_177, seq_353); + return instruction_sequence; +} + +// Rdd = vmpywoh(Rss,Rtt):<<1:sat +RzILOpEffect *hex_il_op_m2_mmpyh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_114 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_17 = LOGAND(op_RSHIFT_15, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_29 = SHIFTRA(Rtt, SN(32, 0x30)); + RzILOpPure *op_AND_32 = LOGAND(op_RSHIFT_29, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_39 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_17), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_32), DUP(op_AND_32))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_41 = SHIFTL0(op_MUL_39, SN(32, 1)); + RzILOpPure *op_RSHIFT_43 = SHIFTRA(op_LSHIFT_41, SN(32, 16)); + RzILOpPure *op_RSHIFT_52 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_54 = LOGAND(op_RSHIFT_52, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_65 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_68 = LOGAND(op_RSHIFT_65, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_75 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_54), DUP(op_AND_54))), CAST(32, MSB(DUP(op_AND_54)), DUP(op_AND_54)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_54)), DUP(op_AND_54))), CAST(32, MSB(DUP(op_AND_54)), DUP(op_AND_54))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_54)), DUP(op_AND_54))), CAST(32, MSB(DUP(op_AND_54)), DUP(op_AND_54)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_54)), DUP(op_AND_54))), CAST(32, MSB(DUP(op_AND_54)), DUP(op_AND_54))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_68), DUP(op_AND_68))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_77 = SHIFTL0(op_MUL_75, SN(32, 1)); + RzILOpPure *op_RSHIFT_79 = SHIFTRA(op_LSHIFT_77, SN(32, 16)); + RzILOpPure *op_EQ_80 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_43), SN(32, 0), SN(32, 0x20)), op_RSHIFT_79); + RzILOpPure *op_RSHIFT_118 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_120 = LOGAND(op_RSHIFT_118, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_131 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_134 = LOGAND(op_RSHIFT_131, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_141 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_120), DUP(op_AND_120))), CAST(32, MSB(DUP(op_AND_120)), DUP(op_AND_120)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_120)), DUP(op_AND_120))), CAST(32, MSB(DUP(op_AND_120)), DUP(op_AND_120))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_120)), DUP(op_AND_120))), CAST(32, MSB(DUP(op_AND_120)), DUP(op_AND_120)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_120)), DUP(op_AND_120))), CAST(32, MSB(DUP(op_AND_120)), DUP(op_AND_120))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_134), DUP(op_AND_134))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_143 = SHIFTL0(op_MUL_141, SN(32, 1)); + RzILOpPure *op_RSHIFT_145 = SHIFTRA(op_LSHIFT_143, SN(32, 16)); + RzILOpPure *op_LT_148 = SLT(op_RSHIFT_145, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_153 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_154 = NEG(op_LSHIFT_153); + RzILOpPure *op_LSHIFT_159 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_162 = SUB(op_LSHIFT_159, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_163 = ITE(op_LT_148, op_NEG_154, op_SUB_162); + RzILOpEffect *gcc_expr_164 = BRANCH(op_EQ_80, EMPTY(), set_usr_field_call_114); + + // h_tmp306 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_166 = SETL("h_tmp306", cond_163); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st6 ...; + RzILOpEffect *seq_167 = SEQN(2, gcc_expr_164, op_ASSIGN_hybrid_tmp_166); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10)) ? ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10) : h_tmp306) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_84 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_86 = LOGAND(op_RSHIFT_84, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_97 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_100 = LOGAND(op_RSHIFT_97, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_107 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_86), DUP(op_AND_86))), CAST(32, MSB(DUP(op_AND_86)), DUP(op_AND_86)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_86)), DUP(op_AND_86))), CAST(32, MSB(DUP(op_AND_86)), DUP(op_AND_86))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_86)), DUP(op_AND_86))), CAST(32, MSB(DUP(op_AND_86)), DUP(op_AND_86)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_86)), DUP(op_AND_86))), CAST(32, MSB(DUP(op_AND_86)), DUP(op_AND_86))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_100), DUP(op_AND_100))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_109 = SHIFTL0(op_MUL_107, SN(32, 1)); + RzILOpPure *op_RSHIFT_111 = SHIFTRA(op_LSHIFT_109, SN(32, 16)); + RzILOpPure *cond_168 = ITE(DUP(op_EQ_80), op_RSHIFT_111, VARL("h_tmp306")); + RzILOpPure *op_AND_170 = LOGAND(cond_168, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_174 = SHIFTL0(op_AND_170, SN(32, 0x20)); + RzILOpPure *op_OR_175 = LOGOR(op_AND_7, op_LSHIFT_174); + RzILOpEffect *op_ASSIGN_176 = WRITE_REG(bundle, Rdd_op, op_OR_175); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ( ...; + RzILOpEffect *seq_177 = SEQN(2, seq_167, op_ASSIGN_176); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_290 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_192 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_194 = LOGAND(op_RSHIFT_192, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_205 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_208 = LOGAND(op_RSHIFT_205, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_215 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_194), DUP(op_AND_194))), CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))), CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))), CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))), CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_208), DUP(op_AND_208))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_217 = SHIFTL0(op_MUL_215, SN(32, 1)); + RzILOpPure *op_RSHIFT_219 = SHIFTRA(op_LSHIFT_217, SN(32, 16)); + RzILOpPure *op_RSHIFT_228 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_230 = LOGAND(op_RSHIFT_228, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_241 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_244 = LOGAND(op_RSHIFT_241, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_251 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_230), DUP(op_AND_230))), CAST(32, MSB(DUP(op_AND_230)), DUP(op_AND_230)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_230)), DUP(op_AND_230))), CAST(32, MSB(DUP(op_AND_230)), DUP(op_AND_230))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_230)), DUP(op_AND_230))), CAST(32, MSB(DUP(op_AND_230)), DUP(op_AND_230)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_230)), DUP(op_AND_230))), CAST(32, MSB(DUP(op_AND_230)), DUP(op_AND_230))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_244), DUP(op_AND_244))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_253 = SHIFTL0(op_MUL_251, SN(32, 1)); + RzILOpPure *op_RSHIFT_255 = SHIFTRA(op_LSHIFT_253, SN(32, 16)); + RzILOpPure *op_EQ_256 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_219), SN(32, 0), SN(32, 0x20)), op_RSHIFT_255); + RzILOpPure *op_RSHIFT_294 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_296 = LOGAND(op_RSHIFT_294, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_307 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_310 = LOGAND(op_RSHIFT_307, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_317 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_296), DUP(op_AND_296))), CAST(32, MSB(DUP(op_AND_296)), DUP(op_AND_296)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_296)), DUP(op_AND_296))), CAST(32, MSB(DUP(op_AND_296)), DUP(op_AND_296))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_296)), DUP(op_AND_296))), CAST(32, MSB(DUP(op_AND_296)), DUP(op_AND_296)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_296)), DUP(op_AND_296))), CAST(32, MSB(DUP(op_AND_296)), DUP(op_AND_296))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_310), DUP(op_AND_310))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_319 = SHIFTL0(op_MUL_317, SN(32, 1)); + RzILOpPure *op_RSHIFT_321 = SHIFTRA(op_LSHIFT_319, SN(32, 16)); + RzILOpPure *op_LT_324 = SLT(op_RSHIFT_321, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_329 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_330 = NEG(op_LSHIFT_329); + RzILOpPure *op_LSHIFT_335 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_338 = SUB(op_LSHIFT_335, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_339 = ITE(op_LT_324, op_NEG_330, op_SUB_338); + RzILOpEffect *gcc_expr_340 = BRANCH(op_EQ_256, EMPTY(), set_usr_field_call_290); + + // h_tmp307 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_342 = SETL("h_tmp307", cond_339); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st6 ...; + RzILOpEffect *seq_343 = SEQN(2, gcc_expr_340, op_ASSIGN_hybrid_tmp_342); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10)) ? ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10) : h_tmp307) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_183 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_184 = LOGNOT(op_LSHIFT_183); + RzILOpPure *op_AND_185 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_184); + RzILOpPure *op_RSHIFT_260 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_262 = LOGAND(op_RSHIFT_260, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_273 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_276 = LOGAND(op_RSHIFT_273, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_283 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_262), DUP(op_AND_262))), CAST(32, MSB(DUP(op_AND_262)), DUP(op_AND_262)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_262)), DUP(op_AND_262))), CAST(32, MSB(DUP(op_AND_262)), DUP(op_AND_262))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_262)), DUP(op_AND_262))), CAST(32, MSB(DUP(op_AND_262)), DUP(op_AND_262)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_262)), DUP(op_AND_262))), CAST(32, MSB(DUP(op_AND_262)), DUP(op_AND_262))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_276), DUP(op_AND_276))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_285 = SHIFTL0(op_MUL_283, SN(32, 1)); + RzILOpPure *op_RSHIFT_287 = SHIFTRA(op_LSHIFT_285, SN(32, 16)); + RzILOpPure *cond_344 = ITE(DUP(op_EQ_256), op_RSHIFT_287, VARL("h_tmp307")); + RzILOpPure *op_AND_346 = LOGAND(cond_344, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_350 = SHIFTL0(op_AND_346, SN(32, 0)); + RzILOpPure *op_OR_351 = LOGOR(op_AND_185, op_LSHIFT_350); + RzILOpEffect *op_ASSIGN_352 = WRITE_REG(bundle, Rdd_op, op_OR_351); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ( ...; + RzILOpEffect *seq_353 = SEQN(2, seq_343, op_ASSIGN_352); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_177, seq_353); + return instruction_sequence; +} + +// Rdd = vmpyweh(Rss,Rtt):rnd:sat +RzILOpEffect *hex_il_op_m2_mmpyl_rs0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_123 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_17 = LOGAND(op_RSHIFT_15, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_29 = SHIFTRA(Rtt, SN(32, 0x20)); + RzILOpPure *op_AND_32 = LOGAND(op_RSHIFT_29, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_39 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_17), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_32), DUP(op_AND_32))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_41 = SHIFTL0(op_MUL_39, SN(32, 0)); + RzILOpPure *op_ADD_44 = ADD(op_LSHIFT_41, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_46 = SHIFTRA(op_ADD_44, SN(32, 16)); + RzILOpPure *op_RSHIFT_55 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_57 = LOGAND(op_RSHIFT_55, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_68 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_71 = LOGAND(op_RSHIFT_68, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_78 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_57), DUP(op_AND_57))), CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57))), CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57))), CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57))), CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_71), DUP(op_AND_71))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_80 = SHIFTL0(op_MUL_78, SN(32, 0)); + RzILOpPure *op_ADD_83 = ADD(op_LSHIFT_80, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_85 = SHIFTRA(op_ADD_83, SN(32, 16)); + RzILOpPure *op_EQ_86 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_46), SN(32, 0), SN(32, 0x20)), op_RSHIFT_85); + RzILOpPure *op_RSHIFT_127 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_129 = LOGAND(op_RSHIFT_127, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_140 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_143 = LOGAND(op_RSHIFT_140, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_150 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_129), DUP(op_AND_129))), CAST(32, MSB(DUP(op_AND_129)), DUP(op_AND_129)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_129)), DUP(op_AND_129))), CAST(32, MSB(DUP(op_AND_129)), DUP(op_AND_129))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_129)), DUP(op_AND_129))), CAST(32, MSB(DUP(op_AND_129)), DUP(op_AND_129)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_129)), DUP(op_AND_129))), CAST(32, MSB(DUP(op_AND_129)), DUP(op_AND_129))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_143), DUP(op_AND_143))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_152 = SHIFTL0(op_MUL_150, SN(32, 0)); + RzILOpPure *op_ADD_155 = ADD(op_LSHIFT_152, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_157 = SHIFTRA(op_ADD_155, SN(32, 16)); + RzILOpPure *op_LT_160 = SLT(op_RSHIFT_157, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_165 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_166 = NEG(op_LSHIFT_165); + RzILOpPure *op_LSHIFT_171 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_174 = SUB(op_LSHIFT_171, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_175 = ITE(op_LT_160, op_NEG_166, op_SUB_174); + RzILOpEffect *gcc_expr_176 = BRANCH(op_EQ_86, EMPTY(), set_usr_field_call_123); + + // h_tmp308 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_178 = SETL("h_tmp308", cond_175); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st6 ...; + RzILOpEffect *seq_179 = SEQN(2, gcc_expr_176, op_ASSIGN_hybrid_tmp_178); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10)) ? ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10) : h_tmp308) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_90 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_92 = LOGAND(op_RSHIFT_90, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_103 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_106 = LOGAND(op_RSHIFT_103, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_113 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_92), DUP(op_AND_92))), CAST(32, MSB(DUP(op_AND_92)), DUP(op_AND_92)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_92)), DUP(op_AND_92))), CAST(32, MSB(DUP(op_AND_92)), DUP(op_AND_92))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_92)), DUP(op_AND_92))), CAST(32, MSB(DUP(op_AND_92)), DUP(op_AND_92)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_92)), DUP(op_AND_92))), CAST(32, MSB(DUP(op_AND_92)), DUP(op_AND_92))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_106), DUP(op_AND_106))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_115 = SHIFTL0(op_MUL_113, SN(32, 0)); + RzILOpPure *op_ADD_118 = ADD(op_LSHIFT_115, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_120 = SHIFTRA(op_ADD_118, SN(32, 16)); + RzILOpPure *cond_180 = ITE(DUP(op_EQ_86), op_RSHIFT_120, VARL("h_tmp308")); + RzILOpPure *op_AND_182 = LOGAND(cond_180, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_186 = SHIFTL0(op_AND_182, SN(32, 0x20)); + RzILOpPure *op_OR_187 = LOGOR(op_AND_7, op_LSHIFT_186); + RzILOpEffect *op_ASSIGN_188 = WRITE_REG(bundle, Rdd_op, op_OR_187); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ( ...; + RzILOpEffect *seq_189 = SEQN(2, seq_179, op_ASSIGN_188); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_311 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_204 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_206 = LOGAND(op_RSHIFT_204, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_217 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_220 = LOGAND(op_RSHIFT_217, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_227 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_206), DUP(op_AND_206))), CAST(32, MSB(DUP(op_AND_206)), DUP(op_AND_206)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_206)), DUP(op_AND_206))), CAST(32, MSB(DUP(op_AND_206)), DUP(op_AND_206))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_206)), DUP(op_AND_206))), CAST(32, MSB(DUP(op_AND_206)), DUP(op_AND_206)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_206)), DUP(op_AND_206))), CAST(32, MSB(DUP(op_AND_206)), DUP(op_AND_206))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_220), DUP(op_AND_220))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_229 = SHIFTL0(op_MUL_227, SN(32, 0)); + RzILOpPure *op_ADD_232 = ADD(op_LSHIFT_229, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_234 = SHIFTRA(op_ADD_232, SN(32, 16)); + RzILOpPure *op_RSHIFT_243 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_245 = LOGAND(op_RSHIFT_243, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_256 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_259 = LOGAND(op_RSHIFT_256, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_266 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_245), DUP(op_AND_245))), CAST(32, MSB(DUP(op_AND_245)), DUP(op_AND_245)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_245)), DUP(op_AND_245))), CAST(32, MSB(DUP(op_AND_245)), DUP(op_AND_245))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_245)), DUP(op_AND_245))), CAST(32, MSB(DUP(op_AND_245)), DUP(op_AND_245)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_245)), DUP(op_AND_245))), CAST(32, MSB(DUP(op_AND_245)), DUP(op_AND_245))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_259), DUP(op_AND_259))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_268 = SHIFTL0(op_MUL_266, SN(32, 0)); + RzILOpPure *op_ADD_271 = ADD(op_LSHIFT_268, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_273 = SHIFTRA(op_ADD_271, SN(32, 16)); + RzILOpPure *op_EQ_274 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_234), SN(32, 0), SN(32, 0x20)), op_RSHIFT_273); + RzILOpPure *op_RSHIFT_315 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_317 = LOGAND(op_RSHIFT_315, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_328 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_331 = LOGAND(op_RSHIFT_328, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_338 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_317), DUP(op_AND_317))), CAST(32, MSB(DUP(op_AND_317)), DUP(op_AND_317)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_317)), DUP(op_AND_317))), CAST(32, MSB(DUP(op_AND_317)), DUP(op_AND_317))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_317)), DUP(op_AND_317))), CAST(32, MSB(DUP(op_AND_317)), DUP(op_AND_317)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_317)), DUP(op_AND_317))), CAST(32, MSB(DUP(op_AND_317)), DUP(op_AND_317))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_331), DUP(op_AND_331))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_340 = SHIFTL0(op_MUL_338, SN(32, 0)); + RzILOpPure *op_ADD_343 = ADD(op_LSHIFT_340, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_345 = SHIFTRA(op_ADD_343, SN(32, 16)); + RzILOpPure *op_LT_348 = SLT(op_RSHIFT_345, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_353 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_354 = NEG(op_LSHIFT_353); + RzILOpPure *op_LSHIFT_359 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_362 = SUB(op_LSHIFT_359, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_363 = ITE(op_LT_348, op_NEG_354, op_SUB_362); + RzILOpEffect *gcc_expr_364 = BRANCH(op_EQ_274, EMPTY(), set_usr_field_call_311); + + // h_tmp309 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_366 = SETL("h_tmp309", cond_363); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st6 ...; + RzILOpEffect *seq_367 = SEQN(2, gcc_expr_364, op_ASSIGN_hybrid_tmp_366); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10)) ? ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + ((st64) 0x8000) >> 0x10) : h_tmp309) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_195 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_196 = LOGNOT(op_LSHIFT_195); + RzILOpPure *op_AND_197 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_196); + RzILOpPure *op_RSHIFT_278 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_280 = LOGAND(op_RSHIFT_278, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_291 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_294 = LOGAND(op_RSHIFT_291, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_301 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_280), DUP(op_AND_280))), CAST(32, MSB(DUP(op_AND_280)), DUP(op_AND_280)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_280)), DUP(op_AND_280))), CAST(32, MSB(DUP(op_AND_280)), DUP(op_AND_280))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_280)), DUP(op_AND_280))), CAST(32, MSB(DUP(op_AND_280)), DUP(op_AND_280)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_280)), DUP(op_AND_280))), CAST(32, MSB(DUP(op_AND_280)), DUP(op_AND_280))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_294), DUP(op_AND_294))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_303 = SHIFTL0(op_MUL_301, SN(32, 0)); + RzILOpPure *op_ADD_306 = ADD(op_LSHIFT_303, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_308 = SHIFTRA(op_ADD_306, SN(32, 16)); + RzILOpPure *cond_368 = ITE(DUP(op_EQ_274), op_RSHIFT_308, VARL("h_tmp309")); + RzILOpPure *op_AND_370 = LOGAND(cond_368, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_374 = SHIFTL0(op_AND_370, SN(32, 0)); + RzILOpPure *op_OR_375 = LOGOR(op_AND_197, op_LSHIFT_374); + RzILOpEffect *op_ASSIGN_376 = WRITE_REG(bundle, Rdd_op, op_OR_375); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ( ...; + RzILOpEffect *seq_377 = SEQN(2, seq_367, op_ASSIGN_376); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_189, seq_377); + return instruction_sequence; +} + +// Rdd = vmpyweh(Rss,Rtt):<<1:rnd:sat +RzILOpEffect *hex_il_op_m2_mmpyl_rs1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_123 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_17 = LOGAND(op_RSHIFT_15, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_29 = SHIFTRA(Rtt, SN(32, 0x20)); + RzILOpPure *op_AND_32 = LOGAND(op_RSHIFT_29, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_39 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_17), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_32), DUP(op_AND_32))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_41 = SHIFTL0(op_MUL_39, SN(32, 1)); + RzILOpPure *op_ADD_44 = ADD(op_LSHIFT_41, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_46 = SHIFTRA(op_ADD_44, SN(32, 16)); + RzILOpPure *op_RSHIFT_55 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_57 = LOGAND(op_RSHIFT_55, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_68 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_71 = LOGAND(op_RSHIFT_68, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_78 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_57), DUP(op_AND_57))), CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57))), CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57))), CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57))), CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_71), DUP(op_AND_71))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_80 = SHIFTL0(op_MUL_78, SN(32, 1)); + RzILOpPure *op_ADD_83 = ADD(op_LSHIFT_80, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_85 = SHIFTRA(op_ADD_83, SN(32, 16)); + RzILOpPure *op_EQ_86 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_46), SN(32, 0), SN(32, 0x20)), op_RSHIFT_85); + RzILOpPure *op_RSHIFT_127 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_129 = LOGAND(op_RSHIFT_127, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_140 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_143 = LOGAND(op_RSHIFT_140, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_150 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_129), DUP(op_AND_129))), CAST(32, MSB(DUP(op_AND_129)), DUP(op_AND_129)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_129)), DUP(op_AND_129))), CAST(32, MSB(DUP(op_AND_129)), DUP(op_AND_129))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_129)), DUP(op_AND_129))), CAST(32, MSB(DUP(op_AND_129)), DUP(op_AND_129)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_129)), DUP(op_AND_129))), CAST(32, MSB(DUP(op_AND_129)), DUP(op_AND_129))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_143), DUP(op_AND_143))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_152 = SHIFTL0(op_MUL_150, SN(32, 1)); + RzILOpPure *op_ADD_155 = ADD(op_LSHIFT_152, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_157 = SHIFTRA(op_ADD_155, SN(32, 16)); + RzILOpPure *op_LT_160 = SLT(op_RSHIFT_157, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_165 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_166 = NEG(op_LSHIFT_165); + RzILOpPure *op_LSHIFT_171 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_174 = SUB(op_LSHIFT_171, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_175 = ITE(op_LT_160, op_NEG_166, op_SUB_174); + RzILOpEffect *gcc_expr_176 = BRANCH(op_EQ_86, EMPTY(), set_usr_field_call_123); + + // h_tmp310 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_178 = SETL("h_tmp310", cond_175); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st6 ...; + RzILOpEffect *seq_179 = SEQN(2, gcc_expr_176, op_ASSIGN_hybrid_tmp_178); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)) ? ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10) : h_tmp310) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_90 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_92 = LOGAND(op_RSHIFT_90, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_103 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_106 = LOGAND(op_RSHIFT_103, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_113 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_92), DUP(op_AND_92))), CAST(32, MSB(DUP(op_AND_92)), DUP(op_AND_92)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_92)), DUP(op_AND_92))), CAST(32, MSB(DUP(op_AND_92)), DUP(op_AND_92))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_92)), DUP(op_AND_92))), CAST(32, MSB(DUP(op_AND_92)), DUP(op_AND_92)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_92)), DUP(op_AND_92))), CAST(32, MSB(DUP(op_AND_92)), DUP(op_AND_92))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_106), DUP(op_AND_106))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_115 = SHIFTL0(op_MUL_113, SN(32, 1)); + RzILOpPure *op_ADD_118 = ADD(op_LSHIFT_115, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_120 = SHIFTRA(op_ADD_118, SN(32, 16)); + RzILOpPure *cond_180 = ITE(DUP(op_EQ_86), op_RSHIFT_120, VARL("h_tmp310")); + RzILOpPure *op_AND_182 = LOGAND(cond_180, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_186 = SHIFTL0(op_AND_182, SN(32, 0x20)); + RzILOpPure *op_OR_187 = LOGOR(op_AND_7, op_LSHIFT_186); + RzILOpEffect *op_ASSIGN_188 = WRITE_REG(bundle, Rdd_op, op_OR_187); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ( ...; + RzILOpEffect *seq_189 = SEQN(2, seq_179, op_ASSIGN_188); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_311 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_204 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_206 = LOGAND(op_RSHIFT_204, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_217 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_220 = LOGAND(op_RSHIFT_217, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_227 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_206), DUP(op_AND_206))), CAST(32, MSB(DUP(op_AND_206)), DUP(op_AND_206)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_206)), DUP(op_AND_206))), CAST(32, MSB(DUP(op_AND_206)), DUP(op_AND_206))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_206)), DUP(op_AND_206))), CAST(32, MSB(DUP(op_AND_206)), DUP(op_AND_206)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_206)), DUP(op_AND_206))), CAST(32, MSB(DUP(op_AND_206)), DUP(op_AND_206))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_220), DUP(op_AND_220))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_229 = SHIFTL0(op_MUL_227, SN(32, 1)); + RzILOpPure *op_ADD_232 = ADD(op_LSHIFT_229, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_234 = SHIFTRA(op_ADD_232, SN(32, 16)); + RzILOpPure *op_RSHIFT_243 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_245 = LOGAND(op_RSHIFT_243, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_256 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_259 = LOGAND(op_RSHIFT_256, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_266 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_245), DUP(op_AND_245))), CAST(32, MSB(DUP(op_AND_245)), DUP(op_AND_245)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_245)), DUP(op_AND_245))), CAST(32, MSB(DUP(op_AND_245)), DUP(op_AND_245))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_245)), DUP(op_AND_245))), CAST(32, MSB(DUP(op_AND_245)), DUP(op_AND_245)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_245)), DUP(op_AND_245))), CAST(32, MSB(DUP(op_AND_245)), DUP(op_AND_245))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_259), DUP(op_AND_259))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_268 = SHIFTL0(op_MUL_266, SN(32, 1)); + RzILOpPure *op_ADD_271 = ADD(op_LSHIFT_268, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_273 = SHIFTRA(op_ADD_271, SN(32, 16)); + RzILOpPure *op_EQ_274 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_234), SN(32, 0), SN(32, 0x20)), op_RSHIFT_273); + RzILOpPure *op_RSHIFT_315 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_317 = LOGAND(op_RSHIFT_315, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_328 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_331 = LOGAND(op_RSHIFT_328, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_338 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_317), DUP(op_AND_317))), CAST(32, MSB(DUP(op_AND_317)), DUP(op_AND_317)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_317)), DUP(op_AND_317))), CAST(32, MSB(DUP(op_AND_317)), DUP(op_AND_317))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_317)), DUP(op_AND_317))), CAST(32, MSB(DUP(op_AND_317)), DUP(op_AND_317)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_317)), DUP(op_AND_317))), CAST(32, MSB(DUP(op_AND_317)), DUP(op_AND_317))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_331), DUP(op_AND_331))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_340 = SHIFTL0(op_MUL_338, SN(32, 1)); + RzILOpPure *op_ADD_343 = ADD(op_LSHIFT_340, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_345 = SHIFTRA(op_ADD_343, SN(32, 16)); + RzILOpPure *op_LT_348 = SLT(op_RSHIFT_345, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_353 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_354 = NEG(op_LSHIFT_353); + RzILOpPure *op_LSHIFT_359 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_362 = SUB(op_LSHIFT_359, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_363 = ITE(op_LT_348, op_NEG_354, op_SUB_362); + RzILOpEffect *gcc_expr_364 = BRANCH(op_EQ_274, EMPTY(), set_usr_field_call_311); + + // h_tmp311 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_366 = SETL("h_tmp311", cond_363); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st6 ...; + RzILOpEffect *seq_367 = SEQN(2, gcc_expr_364, op_ASSIGN_hybrid_tmp_366); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10)) ? ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + ((st64) 0x8000) >> 0x10) : h_tmp311) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_195 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_196 = LOGNOT(op_LSHIFT_195); + RzILOpPure *op_AND_197 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_196); + RzILOpPure *op_RSHIFT_278 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_280 = LOGAND(op_RSHIFT_278, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_291 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_294 = LOGAND(op_RSHIFT_291, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_301 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_280), DUP(op_AND_280))), CAST(32, MSB(DUP(op_AND_280)), DUP(op_AND_280)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_280)), DUP(op_AND_280))), CAST(32, MSB(DUP(op_AND_280)), DUP(op_AND_280))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_280)), DUP(op_AND_280))), CAST(32, MSB(DUP(op_AND_280)), DUP(op_AND_280)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_280)), DUP(op_AND_280))), CAST(32, MSB(DUP(op_AND_280)), DUP(op_AND_280))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_294), DUP(op_AND_294))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_303 = SHIFTL0(op_MUL_301, SN(32, 1)); + RzILOpPure *op_ADD_306 = ADD(op_LSHIFT_303, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_308 = SHIFTRA(op_ADD_306, SN(32, 16)); + RzILOpPure *cond_368 = ITE(DUP(op_EQ_274), op_RSHIFT_308, VARL("h_tmp311")); + RzILOpPure *op_AND_370 = LOGAND(cond_368, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_374 = SHIFTL0(op_AND_370, SN(32, 0)); + RzILOpPure *op_OR_375 = LOGOR(op_AND_197, op_LSHIFT_374); + RzILOpEffect *op_ASSIGN_376 = WRITE_REG(bundle, Rdd_op, op_OR_375); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ( ...; + RzILOpEffect *seq_377 = SEQN(2, seq_367, op_ASSIGN_376); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_189, seq_377); + return instruction_sequence; +} + +// Rdd = vmpyweh(Rss,Rtt):sat +RzILOpEffect *hex_il_op_m2_mmpyl_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_114 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_17 = LOGAND(op_RSHIFT_15, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_29 = SHIFTRA(Rtt, SN(32, 0x20)); + RzILOpPure *op_AND_32 = LOGAND(op_RSHIFT_29, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_39 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_17), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_32), DUP(op_AND_32))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_41 = SHIFTL0(op_MUL_39, SN(32, 0)); + RzILOpPure *op_RSHIFT_43 = SHIFTRA(op_LSHIFT_41, SN(32, 16)); + RzILOpPure *op_RSHIFT_52 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_54 = LOGAND(op_RSHIFT_52, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_65 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_68 = LOGAND(op_RSHIFT_65, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_75 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_54), DUP(op_AND_54))), CAST(32, MSB(DUP(op_AND_54)), DUP(op_AND_54)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_54)), DUP(op_AND_54))), CAST(32, MSB(DUP(op_AND_54)), DUP(op_AND_54))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_54)), DUP(op_AND_54))), CAST(32, MSB(DUP(op_AND_54)), DUP(op_AND_54)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_54)), DUP(op_AND_54))), CAST(32, MSB(DUP(op_AND_54)), DUP(op_AND_54))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_68), DUP(op_AND_68))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_77 = SHIFTL0(op_MUL_75, SN(32, 0)); + RzILOpPure *op_RSHIFT_79 = SHIFTRA(op_LSHIFT_77, SN(32, 16)); + RzILOpPure *op_EQ_80 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_43), SN(32, 0), SN(32, 0x20)), op_RSHIFT_79); + RzILOpPure *op_RSHIFT_118 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_120 = LOGAND(op_RSHIFT_118, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_131 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_134 = LOGAND(op_RSHIFT_131, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_141 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_120), DUP(op_AND_120))), CAST(32, MSB(DUP(op_AND_120)), DUP(op_AND_120)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_120)), DUP(op_AND_120))), CAST(32, MSB(DUP(op_AND_120)), DUP(op_AND_120))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_120)), DUP(op_AND_120))), CAST(32, MSB(DUP(op_AND_120)), DUP(op_AND_120)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_120)), DUP(op_AND_120))), CAST(32, MSB(DUP(op_AND_120)), DUP(op_AND_120))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_134), DUP(op_AND_134))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_143 = SHIFTL0(op_MUL_141, SN(32, 0)); + RzILOpPure *op_RSHIFT_145 = SHIFTRA(op_LSHIFT_143, SN(32, 16)); + RzILOpPure *op_LT_148 = SLT(op_RSHIFT_145, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_153 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_154 = NEG(op_LSHIFT_153); + RzILOpPure *op_LSHIFT_159 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_162 = SUB(op_LSHIFT_159, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_163 = ITE(op_LT_148, op_NEG_154, op_SUB_162); + RzILOpEffect *gcc_expr_164 = BRANCH(op_EQ_80, EMPTY(), set_usr_field_call_114); + + // h_tmp312 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_166 = SETL("h_tmp312", cond_163); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st6 ...; + RzILOpEffect *seq_167 = SEQN(2, gcc_expr_164, op_ASSIGN_hybrid_tmp_166); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10)) ? ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10) : h_tmp312) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_84 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_86 = LOGAND(op_RSHIFT_84, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_97 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_100 = LOGAND(op_RSHIFT_97, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_107 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_86), DUP(op_AND_86))), CAST(32, MSB(DUP(op_AND_86)), DUP(op_AND_86)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_86)), DUP(op_AND_86))), CAST(32, MSB(DUP(op_AND_86)), DUP(op_AND_86))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_86)), DUP(op_AND_86))), CAST(32, MSB(DUP(op_AND_86)), DUP(op_AND_86)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_86)), DUP(op_AND_86))), CAST(32, MSB(DUP(op_AND_86)), DUP(op_AND_86))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_100), DUP(op_AND_100))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_109 = SHIFTL0(op_MUL_107, SN(32, 0)); + RzILOpPure *op_RSHIFT_111 = SHIFTRA(op_LSHIFT_109, SN(32, 16)); + RzILOpPure *cond_168 = ITE(DUP(op_EQ_80), op_RSHIFT_111, VARL("h_tmp312")); + RzILOpPure *op_AND_170 = LOGAND(cond_168, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_174 = SHIFTL0(op_AND_170, SN(32, 0x20)); + RzILOpPure *op_OR_175 = LOGOR(op_AND_7, op_LSHIFT_174); + RzILOpEffect *op_ASSIGN_176 = WRITE_REG(bundle, Rdd_op, op_OR_175); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ( ...; + RzILOpEffect *seq_177 = SEQN(2, seq_167, op_ASSIGN_176); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_290 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_192 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_194 = LOGAND(op_RSHIFT_192, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_205 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_208 = LOGAND(op_RSHIFT_205, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_215 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_194), DUP(op_AND_194))), CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))), CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))), CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))), CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_208), DUP(op_AND_208))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_217 = SHIFTL0(op_MUL_215, SN(32, 0)); + RzILOpPure *op_RSHIFT_219 = SHIFTRA(op_LSHIFT_217, SN(32, 16)); + RzILOpPure *op_RSHIFT_228 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_230 = LOGAND(op_RSHIFT_228, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_241 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_244 = LOGAND(op_RSHIFT_241, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_251 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_230), DUP(op_AND_230))), CAST(32, MSB(DUP(op_AND_230)), DUP(op_AND_230)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_230)), DUP(op_AND_230))), CAST(32, MSB(DUP(op_AND_230)), DUP(op_AND_230))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_230)), DUP(op_AND_230))), CAST(32, MSB(DUP(op_AND_230)), DUP(op_AND_230)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_230)), DUP(op_AND_230))), CAST(32, MSB(DUP(op_AND_230)), DUP(op_AND_230))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_244), DUP(op_AND_244))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_253 = SHIFTL0(op_MUL_251, SN(32, 0)); + RzILOpPure *op_RSHIFT_255 = SHIFTRA(op_LSHIFT_253, SN(32, 16)); + RzILOpPure *op_EQ_256 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_219), SN(32, 0), SN(32, 0x20)), op_RSHIFT_255); + RzILOpPure *op_RSHIFT_294 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_296 = LOGAND(op_RSHIFT_294, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_307 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_310 = LOGAND(op_RSHIFT_307, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_317 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_296), DUP(op_AND_296))), CAST(32, MSB(DUP(op_AND_296)), DUP(op_AND_296)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_296)), DUP(op_AND_296))), CAST(32, MSB(DUP(op_AND_296)), DUP(op_AND_296))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_296)), DUP(op_AND_296))), CAST(32, MSB(DUP(op_AND_296)), DUP(op_AND_296)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_296)), DUP(op_AND_296))), CAST(32, MSB(DUP(op_AND_296)), DUP(op_AND_296))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_310), DUP(op_AND_310))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_319 = SHIFTL0(op_MUL_317, SN(32, 0)); + RzILOpPure *op_RSHIFT_321 = SHIFTRA(op_LSHIFT_319, SN(32, 16)); + RzILOpPure *op_LT_324 = SLT(op_RSHIFT_321, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_329 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_330 = NEG(op_LSHIFT_329); + RzILOpPure *op_LSHIFT_335 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_338 = SUB(op_LSHIFT_335, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_339 = ITE(op_LT_324, op_NEG_330, op_SUB_338); + RzILOpEffect *gcc_expr_340 = BRANCH(op_EQ_256, EMPTY(), set_usr_field_call_290); + + // h_tmp313 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_342 = SETL("h_tmp313", cond_339); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st6 ...; + RzILOpEffect *seq_343 = SEQN(2, gcc_expr_340, op_ASSIGN_hybrid_tmp_342); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10)) ? ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) >> 0x10) : h_tmp313) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_183 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_184 = LOGNOT(op_LSHIFT_183); + RzILOpPure *op_AND_185 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_184); + RzILOpPure *op_RSHIFT_260 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_262 = LOGAND(op_RSHIFT_260, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_273 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_276 = LOGAND(op_RSHIFT_273, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_283 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_262), DUP(op_AND_262))), CAST(32, MSB(DUP(op_AND_262)), DUP(op_AND_262)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_262)), DUP(op_AND_262))), CAST(32, MSB(DUP(op_AND_262)), DUP(op_AND_262))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_262)), DUP(op_AND_262))), CAST(32, MSB(DUP(op_AND_262)), DUP(op_AND_262)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_262)), DUP(op_AND_262))), CAST(32, MSB(DUP(op_AND_262)), DUP(op_AND_262))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_276), DUP(op_AND_276))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_285 = SHIFTL0(op_MUL_283, SN(32, 0)); + RzILOpPure *op_RSHIFT_287 = SHIFTRA(op_LSHIFT_285, SN(32, 16)); + RzILOpPure *cond_344 = ITE(DUP(op_EQ_256), op_RSHIFT_287, VARL("h_tmp313")); + RzILOpPure *op_AND_346 = LOGAND(cond_344, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_350 = SHIFTL0(op_AND_346, SN(32, 0)); + RzILOpPure *op_OR_351 = LOGOR(op_AND_185, op_LSHIFT_350); + RzILOpEffect *op_ASSIGN_352 = WRITE_REG(bundle, Rdd_op, op_OR_351); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ( ...; + RzILOpEffect *seq_353 = SEQN(2, seq_343, op_ASSIGN_352); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_177, seq_353); + return instruction_sequence; +} + +// Rdd = vmpyweh(Rss,Rtt):<<1:sat +RzILOpEffect *hex_il_op_m2_mmpyl_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_114 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_17 = LOGAND(op_RSHIFT_15, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_29 = SHIFTRA(Rtt, SN(32, 0x20)); + RzILOpPure *op_AND_32 = LOGAND(op_RSHIFT_29, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_39 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_17), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_32), DUP(op_AND_32))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_41 = SHIFTL0(op_MUL_39, SN(32, 1)); + RzILOpPure *op_RSHIFT_43 = SHIFTRA(op_LSHIFT_41, SN(32, 16)); + RzILOpPure *op_RSHIFT_52 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_54 = LOGAND(op_RSHIFT_52, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_65 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_68 = LOGAND(op_RSHIFT_65, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_75 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_54), DUP(op_AND_54))), CAST(32, MSB(DUP(op_AND_54)), DUP(op_AND_54)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_54)), DUP(op_AND_54))), CAST(32, MSB(DUP(op_AND_54)), DUP(op_AND_54))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_54)), DUP(op_AND_54))), CAST(32, MSB(DUP(op_AND_54)), DUP(op_AND_54)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_54)), DUP(op_AND_54))), CAST(32, MSB(DUP(op_AND_54)), DUP(op_AND_54))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_68), DUP(op_AND_68))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_77 = SHIFTL0(op_MUL_75, SN(32, 1)); + RzILOpPure *op_RSHIFT_79 = SHIFTRA(op_LSHIFT_77, SN(32, 16)); + RzILOpPure *op_EQ_80 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_43), SN(32, 0), SN(32, 0x20)), op_RSHIFT_79); + RzILOpPure *op_RSHIFT_118 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_120 = LOGAND(op_RSHIFT_118, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_131 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_134 = LOGAND(op_RSHIFT_131, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_141 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_120), DUP(op_AND_120))), CAST(32, MSB(DUP(op_AND_120)), DUP(op_AND_120)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_120)), DUP(op_AND_120))), CAST(32, MSB(DUP(op_AND_120)), DUP(op_AND_120))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_120)), DUP(op_AND_120))), CAST(32, MSB(DUP(op_AND_120)), DUP(op_AND_120)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_120)), DUP(op_AND_120))), CAST(32, MSB(DUP(op_AND_120)), DUP(op_AND_120))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_134), DUP(op_AND_134))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_143 = SHIFTL0(op_MUL_141, SN(32, 1)); + RzILOpPure *op_RSHIFT_145 = SHIFTRA(op_LSHIFT_143, SN(32, 16)); + RzILOpPure *op_LT_148 = SLT(op_RSHIFT_145, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_153 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_154 = NEG(op_LSHIFT_153); + RzILOpPure *op_LSHIFT_159 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_162 = SUB(op_LSHIFT_159, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_163 = ITE(op_LT_148, op_NEG_154, op_SUB_162); + RzILOpEffect *gcc_expr_164 = BRANCH(op_EQ_80, EMPTY(), set_usr_field_call_114); + + // h_tmp314 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_166 = SETL("h_tmp314", cond_163); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st6 ...; + RzILOpEffect *seq_167 = SEQN(2, gcc_expr_164, op_ASSIGN_hybrid_tmp_166); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10)) ? ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10) : h_tmp314) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_84 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_86 = LOGAND(op_RSHIFT_84, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_97 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_100 = LOGAND(op_RSHIFT_97, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_107 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_86), DUP(op_AND_86))), CAST(32, MSB(DUP(op_AND_86)), DUP(op_AND_86)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_86)), DUP(op_AND_86))), CAST(32, MSB(DUP(op_AND_86)), DUP(op_AND_86))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_86)), DUP(op_AND_86))), CAST(32, MSB(DUP(op_AND_86)), DUP(op_AND_86)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_86)), DUP(op_AND_86))), CAST(32, MSB(DUP(op_AND_86)), DUP(op_AND_86))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_100), DUP(op_AND_100))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_109 = SHIFTL0(op_MUL_107, SN(32, 1)); + RzILOpPure *op_RSHIFT_111 = SHIFTRA(op_LSHIFT_109, SN(32, 16)); + RzILOpPure *cond_168 = ITE(DUP(op_EQ_80), op_RSHIFT_111, VARL("h_tmp314")); + RzILOpPure *op_AND_170 = LOGAND(cond_168, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_174 = SHIFTL0(op_AND_170, SN(32, 0x20)); + RzILOpPure *op_OR_175 = LOGOR(op_AND_7, op_LSHIFT_174); + RzILOpEffect *op_ASSIGN_176 = WRITE_REG(bundle, Rdd_op, op_OR_175); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ( ...; + RzILOpEffect *seq_177 = SEQN(2, seq_167, op_ASSIGN_176); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_290 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_192 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_194 = LOGAND(op_RSHIFT_192, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_205 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_208 = LOGAND(op_RSHIFT_205, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_215 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_194), DUP(op_AND_194))), CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))), CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))), CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))), CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_208), DUP(op_AND_208))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_217 = SHIFTL0(op_MUL_215, SN(32, 1)); + RzILOpPure *op_RSHIFT_219 = SHIFTRA(op_LSHIFT_217, SN(32, 16)); + RzILOpPure *op_RSHIFT_228 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_230 = LOGAND(op_RSHIFT_228, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_241 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_244 = LOGAND(op_RSHIFT_241, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_251 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_230), DUP(op_AND_230))), CAST(32, MSB(DUP(op_AND_230)), DUP(op_AND_230)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_230)), DUP(op_AND_230))), CAST(32, MSB(DUP(op_AND_230)), DUP(op_AND_230))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_230)), DUP(op_AND_230))), CAST(32, MSB(DUP(op_AND_230)), DUP(op_AND_230)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_230)), DUP(op_AND_230))), CAST(32, MSB(DUP(op_AND_230)), DUP(op_AND_230))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_244), DUP(op_AND_244))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_253 = SHIFTL0(op_MUL_251, SN(32, 1)); + RzILOpPure *op_RSHIFT_255 = SHIFTRA(op_LSHIFT_253, SN(32, 16)); + RzILOpPure *op_EQ_256 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_219), SN(32, 0), SN(32, 0x20)), op_RSHIFT_255); + RzILOpPure *op_RSHIFT_294 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_296 = LOGAND(op_RSHIFT_294, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_307 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_310 = LOGAND(op_RSHIFT_307, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_317 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_296), DUP(op_AND_296))), CAST(32, MSB(DUP(op_AND_296)), DUP(op_AND_296)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_296)), DUP(op_AND_296))), CAST(32, MSB(DUP(op_AND_296)), DUP(op_AND_296))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_296)), DUP(op_AND_296))), CAST(32, MSB(DUP(op_AND_296)), DUP(op_AND_296)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_296)), DUP(op_AND_296))), CAST(32, MSB(DUP(op_AND_296)), DUP(op_AND_296))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_310), DUP(op_AND_310))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_319 = SHIFTL0(op_MUL_317, SN(32, 1)); + RzILOpPure *op_RSHIFT_321 = SHIFTRA(op_LSHIFT_319, SN(32, 16)); + RzILOpPure *op_LT_324 = SLT(op_RSHIFT_321, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_329 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_330 = NEG(op_LSHIFT_329); + RzILOpPure *op_LSHIFT_335 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_338 = SUB(op_LSHIFT_335, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_339 = ITE(op_LT_324, op_NEG_330, op_SUB_338); + RzILOpEffect *gcc_expr_340 = BRANCH(op_EQ_256, EMPTY(), set_usr_field_call_290); + + // h_tmp315 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_342 = SETL("h_tmp315", cond_339); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ((st6 ...; + RzILOpEffect *seq_343 = SEQN(2, gcc_expr_340, op_ASSIGN_hybrid_tmp_342); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10)), 0x0, 0x20) == ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10)) ? ((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) >> 0x10) : h_tmp315) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_183 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_184 = LOGNOT(op_LSHIFT_183); + RzILOpPure *op_AND_185 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_184); + RzILOpPure *op_RSHIFT_260 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_262 = LOGAND(op_RSHIFT_260, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_273 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_276 = LOGAND(op_RSHIFT_273, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_283 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_262), DUP(op_AND_262))), CAST(32, MSB(DUP(op_AND_262)), DUP(op_AND_262)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_262)), DUP(op_AND_262))), CAST(32, MSB(DUP(op_AND_262)), DUP(op_AND_262))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_262)), DUP(op_AND_262))), CAST(32, MSB(DUP(op_AND_262)), DUP(op_AND_262)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_262)), DUP(op_AND_262))), CAST(32, MSB(DUP(op_AND_262)), DUP(op_AND_262))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_276), DUP(op_AND_276))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_285 = SHIFTL0(op_MUL_283, SN(32, 1)); + RzILOpPure *op_RSHIFT_287 = SHIFTRA(op_LSHIFT_285, SN(32, 16)); + RzILOpPure *cond_344 = ITE(DUP(op_EQ_256), op_RSHIFT_287, VARL("h_tmp315")); + RzILOpPure *op_AND_346 = LOGAND(cond_344, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_350 = SHIFTL0(op_AND_346, SN(32, 0)); + RzILOpPure *op_OR_351 = LOGOR(op_AND_185, op_LSHIFT_350); + RzILOpEffect *op_ASSIGN_352 = WRITE_REG(bundle, Rdd_op, op_OR_351); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((st32) ( ...; + RzILOpEffect *seq_353 = SEQN(2, seq_343, op_ASSIGN_352); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_177, seq_353); + return instruction_sequence; +} + +// Rdd = vmpywouh(Rss,Rtt):rnd:sat +RzILOpEffect *hex_il_op_m2_mmpyuh_rs0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_129 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_17 = LOGAND(op_RSHIFT_15, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_29 = SHIFTRA(Rtt, SN(32, 0x30)); + RzILOpPure *op_AND_32 = LOGAND(op_RSHIFT_29, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_40 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_17), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_32)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_43 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_40), SN(32, 0)); + RzILOpPure *op_ADD_46 = ADD(op_LSHIFT_43, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_48 = SHIFTRA(op_ADD_46, SN(32, 16)); + RzILOpPure *op_RSHIFT_57 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_59 = LOGAND(op_RSHIFT_57, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_70 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_73 = LOGAND(op_RSHIFT_70, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_81 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_59), DUP(op_AND_59))), CAST(32, MSB(DUP(op_AND_59)), DUP(op_AND_59)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_59)), DUP(op_AND_59))), CAST(32, MSB(DUP(op_AND_59)), DUP(op_AND_59))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_59)), DUP(op_AND_59))), CAST(32, MSB(DUP(op_AND_59)), DUP(op_AND_59)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_59)), DUP(op_AND_59))), CAST(32, MSB(DUP(op_AND_59)), DUP(op_AND_59)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_73)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_84 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_81), SN(32, 0)); + RzILOpPure *op_ADD_87 = ADD(op_LSHIFT_84, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_89 = SHIFTRA(op_ADD_87, SN(32, 16)); + RzILOpPure *op_EQ_90 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_48), SN(32, 0), SN(32, 0x20)), op_RSHIFT_89); + RzILOpPure *op_RSHIFT_133 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_135 = LOGAND(op_RSHIFT_133, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_146 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_149 = LOGAND(op_RSHIFT_146, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_157 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_135), DUP(op_AND_135))), CAST(32, MSB(DUP(op_AND_135)), DUP(op_AND_135)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_135)), DUP(op_AND_135))), CAST(32, MSB(DUP(op_AND_135)), DUP(op_AND_135))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_135)), DUP(op_AND_135))), CAST(32, MSB(DUP(op_AND_135)), DUP(op_AND_135)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_135)), DUP(op_AND_135))), CAST(32, MSB(DUP(op_AND_135)), DUP(op_AND_135)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_149)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_160 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_157), SN(32, 0)); + RzILOpPure *op_ADD_163 = ADD(op_LSHIFT_160, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_165 = SHIFTRA(op_ADD_163, SN(32, 16)); + RzILOpPure *op_LT_168 = SLT(op_RSHIFT_165, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_173 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_174 = NEG(op_LSHIFT_173); + RzILOpPure *op_LSHIFT_179 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_182 = SUB(op_LSHIFT_179, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_183 = ITE(op_LT_168, op_NEG_174, op_SUB_182); + RzILOpEffect *gcc_expr_184 = BRANCH(op_EQ_90, EMPTY(), set_usr_field_call_129); + + // h_tmp316 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_186 = SETL("h_tmp316", cond_183); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st6 ...; + RzILOpEffect *seq_187 = SEQN(2, gcc_expr_184, op_ASSIGN_hybrid_tmp_186); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10)) ? ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10) : h_tmp316) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_94 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_96 = LOGAND(op_RSHIFT_94, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_107 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_110 = LOGAND(op_RSHIFT_107, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_118 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_96), DUP(op_AND_96))), CAST(32, MSB(DUP(op_AND_96)), DUP(op_AND_96)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_96)), DUP(op_AND_96))), CAST(32, MSB(DUP(op_AND_96)), DUP(op_AND_96))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_96)), DUP(op_AND_96))), CAST(32, MSB(DUP(op_AND_96)), DUP(op_AND_96)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_96)), DUP(op_AND_96))), CAST(32, MSB(DUP(op_AND_96)), DUP(op_AND_96)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_110)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_121 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_118), SN(32, 0)); + RzILOpPure *op_ADD_124 = ADD(op_LSHIFT_121, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_126 = SHIFTRA(op_ADD_124, SN(32, 16)); + RzILOpPure *cond_188 = ITE(DUP(op_EQ_90), op_RSHIFT_126, VARL("h_tmp316")); + RzILOpPure *op_AND_190 = LOGAND(cond_188, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_194 = SHIFTL0(op_AND_190, SN(32, 0x20)); + RzILOpPure *op_OR_195 = LOGOR(op_AND_7, op_LSHIFT_194); + RzILOpEffect *op_ASSIGN_196 = WRITE_REG(bundle, Rdd_op, op_OR_195); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ( ...; + RzILOpEffect *seq_197 = SEQN(2, seq_187, op_ASSIGN_196); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_325 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_212 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_214 = LOGAND(op_RSHIFT_212, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_225 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_228 = LOGAND(op_RSHIFT_225, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_236 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_214), DUP(op_AND_214))), CAST(32, MSB(DUP(op_AND_214)), DUP(op_AND_214)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_214)), DUP(op_AND_214))), CAST(32, MSB(DUP(op_AND_214)), DUP(op_AND_214))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_214)), DUP(op_AND_214))), CAST(32, MSB(DUP(op_AND_214)), DUP(op_AND_214)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_214)), DUP(op_AND_214))), CAST(32, MSB(DUP(op_AND_214)), DUP(op_AND_214)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_228)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_239 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_236), SN(32, 0)); + RzILOpPure *op_ADD_242 = ADD(op_LSHIFT_239, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_244 = SHIFTRA(op_ADD_242, SN(32, 16)); + RzILOpPure *op_RSHIFT_253 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_255 = LOGAND(op_RSHIFT_253, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_266 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_269 = LOGAND(op_RSHIFT_266, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_277 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_255), DUP(op_AND_255))), CAST(32, MSB(DUP(op_AND_255)), DUP(op_AND_255)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_255)), DUP(op_AND_255))), CAST(32, MSB(DUP(op_AND_255)), DUP(op_AND_255))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_255)), DUP(op_AND_255))), CAST(32, MSB(DUP(op_AND_255)), DUP(op_AND_255)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_255)), DUP(op_AND_255))), CAST(32, MSB(DUP(op_AND_255)), DUP(op_AND_255)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_269)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_280 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_277), SN(32, 0)); + RzILOpPure *op_ADD_283 = ADD(op_LSHIFT_280, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_285 = SHIFTRA(op_ADD_283, SN(32, 16)); + RzILOpPure *op_EQ_286 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_244), SN(32, 0), SN(32, 0x20)), op_RSHIFT_285); + RzILOpPure *op_RSHIFT_329 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_331 = LOGAND(op_RSHIFT_329, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_342 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_345 = LOGAND(op_RSHIFT_342, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_353 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_331), DUP(op_AND_331))), CAST(32, MSB(DUP(op_AND_331)), DUP(op_AND_331)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_331)), DUP(op_AND_331))), CAST(32, MSB(DUP(op_AND_331)), DUP(op_AND_331))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_331)), DUP(op_AND_331))), CAST(32, MSB(DUP(op_AND_331)), DUP(op_AND_331)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_331)), DUP(op_AND_331))), CAST(32, MSB(DUP(op_AND_331)), DUP(op_AND_331)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_345)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_356 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_353), SN(32, 0)); + RzILOpPure *op_ADD_359 = ADD(op_LSHIFT_356, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_361 = SHIFTRA(op_ADD_359, SN(32, 16)); + RzILOpPure *op_LT_364 = SLT(op_RSHIFT_361, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_369 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_370 = NEG(op_LSHIFT_369); + RzILOpPure *op_LSHIFT_375 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_378 = SUB(op_LSHIFT_375, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_379 = ITE(op_LT_364, op_NEG_370, op_SUB_378); + RzILOpEffect *gcc_expr_380 = BRANCH(op_EQ_286, EMPTY(), set_usr_field_call_325); + + // h_tmp317 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_382 = SETL("h_tmp317", cond_379); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st6 ...; + RzILOpEffect *seq_383 = SEQN(2, gcc_expr_380, op_ASSIGN_hybrid_tmp_382); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10)) ? ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10) : h_tmp317) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_203 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_204 = LOGNOT(op_LSHIFT_203); + RzILOpPure *op_AND_205 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_204); + RzILOpPure *op_RSHIFT_290 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_292 = LOGAND(op_RSHIFT_290, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_303 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_306 = LOGAND(op_RSHIFT_303, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_314 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_292), DUP(op_AND_292))), CAST(32, MSB(DUP(op_AND_292)), DUP(op_AND_292)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_292)), DUP(op_AND_292))), CAST(32, MSB(DUP(op_AND_292)), DUP(op_AND_292))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_292)), DUP(op_AND_292))), CAST(32, MSB(DUP(op_AND_292)), DUP(op_AND_292)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_292)), DUP(op_AND_292))), CAST(32, MSB(DUP(op_AND_292)), DUP(op_AND_292)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_306)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_317 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_314), SN(32, 0)); + RzILOpPure *op_ADD_320 = ADD(op_LSHIFT_317, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_322 = SHIFTRA(op_ADD_320, SN(32, 16)); + RzILOpPure *cond_384 = ITE(DUP(op_EQ_286), op_RSHIFT_322, VARL("h_tmp317")); + RzILOpPure *op_AND_386 = LOGAND(cond_384, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_390 = SHIFTL0(op_AND_386, SN(32, 0)); + RzILOpPure *op_OR_391 = LOGOR(op_AND_205, op_LSHIFT_390); + RzILOpEffect *op_ASSIGN_392 = WRITE_REG(bundle, Rdd_op, op_OR_391); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ( ...; + RzILOpEffect *seq_393 = SEQN(2, seq_383, op_ASSIGN_392); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_197, seq_393); + return instruction_sequence; +} + +// Rdd = vmpywouh(Rss,Rtt):<<1:rnd:sat +RzILOpEffect *hex_il_op_m2_mmpyuh_rs1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_129 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_17 = LOGAND(op_RSHIFT_15, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_29 = SHIFTRA(Rtt, SN(32, 0x30)); + RzILOpPure *op_AND_32 = LOGAND(op_RSHIFT_29, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_40 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_17), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_32)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_43 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_40), SN(32, 1)); + RzILOpPure *op_ADD_46 = ADD(op_LSHIFT_43, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_48 = SHIFTRA(op_ADD_46, SN(32, 16)); + RzILOpPure *op_RSHIFT_57 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_59 = LOGAND(op_RSHIFT_57, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_70 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_73 = LOGAND(op_RSHIFT_70, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_81 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_59), DUP(op_AND_59))), CAST(32, MSB(DUP(op_AND_59)), DUP(op_AND_59)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_59)), DUP(op_AND_59))), CAST(32, MSB(DUP(op_AND_59)), DUP(op_AND_59))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_59)), DUP(op_AND_59))), CAST(32, MSB(DUP(op_AND_59)), DUP(op_AND_59)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_59)), DUP(op_AND_59))), CAST(32, MSB(DUP(op_AND_59)), DUP(op_AND_59)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_73)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_84 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_81), SN(32, 1)); + RzILOpPure *op_ADD_87 = ADD(op_LSHIFT_84, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_89 = SHIFTRA(op_ADD_87, SN(32, 16)); + RzILOpPure *op_EQ_90 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_48), SN(32, 0), SN(32, 0x20)), op_RSHIFT_89); + RzILOpPure *op_RSHIFT_133 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_135 = LOGAND(op_RSHIFT_133, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_146 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_149 = LOGAND(op_RSHIFT_146, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_157 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_135), DUP(op_AND_135))), CAST(32, MSB(DUP(op_AND_135)), DUP(op_AND_135)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_135)), DUP(op_AND_135))), CAST(32, MSB(DUP(op_AND_135)), DUP(op_AND_135))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_135)), DUP(op_AND_135))), CAST(32, MSB(DUP(op_AND_135)), DUP(op_AND_135)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_135)), DUP(op_AND_135))), CAST(32, MSB(DUP(op_AND_135)), DUP(op_AND_135)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_149)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_160 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_157), SN(32, 1)); + RzILOpPure *op_ADD_163 = ADD(op_LSHIFT_160, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_165 = SHIFTRA(op_ADD_163, SN(32, 16)); + RzILOpPure *op_LT_168 = SLT(op_RSHIFT_165, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_173 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_174 = NEG(op_LSHIFT_173); + RzILOpPure *op_LSHIFT_179 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_182 = SUB(op_LSHIFT_179, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_183 = ITE(op_LT_168, op_NEG_174, op_SUB_182); + RzILOpEffect *gcc_expr_184 = BRANCH(op_EQ_90, EMPTY(), set_usr_field_call_129); + + // h_tmp318 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_186 = SETL("h_tmp318", cond_183); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st6 ...; + RzILOpEffect *seq_187 = SEQN(2, gcc_expr_184, op_ASSIGN_hybrid_tmp_186); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10)) ? ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10) : h_tmp318) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_94 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_96 = LOGAND(op_RSHIFT_94, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_107 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_110 = LOGAND(op_RSHIFT_107, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_118 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_96), DUP(op_AND_96))), CAST(32, MSB(DUP(op_AND_96)), DUP(op_AND_96)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_96)), DUP(op_AND_96))), CAST(32, MSB(DUP(op_AND_96)), DUP(op_AND_96))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_96)), DUP(op_AND_96))), CAST(32, MSB(DUP(op_AND_96)), DUP(op_AND_96)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_96)), DUP(op_AND_96))), CAST(32, MSB(DUP(op_AND_96)), DUP(op_AND_96)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_110)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_121 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_118), SN(32, 1)); + RzILOpPure *op_ADD_124 = ADD(op_LSHIFT_121, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_126 = SHIFTRA(op_ADD_124, SN(32, 16)); + RzILOpPure *cond_188 = ITE(DUP(op_EQ_90), op_RSHIFT_126, VARL("h_tmp318")); + RzILOpPure *op_AND_190 = LOGAND(cond_188, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_194 = SHIFTL0(op_AND_190, SN(32, 0x20)); + RzILOpPure *op_OR_195 = LOGOR(op_AND_7, op_LSHIFT_194); + RzILOpEffect *op_ASSIGN_196 = WRITE_REG(bundle, Rdd_op, op_OR_195); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ( ...; + RzILOpEffect *seq_197 = SEQN(2, seq_187, op_ASSIGN_196); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_325 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_212 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_214 = LOGAND(op_RSHIFT_212, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_225 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_228 = LOGAND(op_RSHIFT_225, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_236 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_214), DUP(op_AND_214))), CAST(32, MSB(DUP(op_AND_214)), DUP(op_AND_214)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_214)), DUP(op_AND_214))), CAST(32, MSB(DUP(op_AND_214)), DUP(op_AND_214))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_214)), DUP(op_AND_214))), CAST(32, MSB(DUP(op_AND_214)), DUP(op_AND_214)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_214)), DUP(op_AND_214))), CAST(32, MSB(DUP(op_AND_214)), DUP(op_AND_214)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_228)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_239 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_236), SN(32, 1)); + RzILOpPure *op_ADD_242 = ADD(op_LSHIFT_239, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_244 = SHIFTRA(op_ADD_242, SN(32, 16)); + RzILOpPure *op_RSHIFT_253 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_255 = LOGAND(op_RSHIFT_253, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_266 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_269 = LOGAND(op_RSHIFT_266, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_277 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_255), DUP(op_AND_255))), CAST(32, MSB(DUP(op_AND_255)), DUP(op_AND_255)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_255)), DUP(op_AND_255))), CAST(32, MSB(DUP(op_AND_255)), DUP(op_AND_255))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_255)), DUP(op_AND_255))), CAST(32, MSB(DUP(op_AND_255)), DUP(op_AND_255)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_255)), DUP(op_AND_255))), CAST(32, MSB(DUP(op_AND_255)), DUP(op_AND_255)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_269)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_280 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_277), SN(32, 1)); + RzILOpPure *op_ADD_283 = ADD(op_LSHIFT_280, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_285 = SHIFTRA(op_ADD_283, SN(32, 16)); + RzILOpPure *op_EQ_286 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_244), SN(32, 0), SN(32, 0x20)), op_RSHIFT_285); + RzILOpPure *op_RSHIFT_329 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_331 = LOGAND(op_RSHIFT_329, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_342 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_345 = LOGAND(op_RSHIFT_342, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_353 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_331), DUP(op_AND_331))), CAST(32, MSB(DUP(op_AND_331)), DUP(op_AND_331)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_331)), DUP(op_AND_331))), CAST(32, MSB(DUP(op_AND_331)), DUP(op_AND_331))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_331)), DUP(op_AND_331))), CAST(32, MSB(DUP(op_AND_331)), DUP(op_AND_331)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_331)), DUP(op_AND_331))), CAST(32, MSB(DUP(op_AND_331)), DUP(op_AND_331)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_345)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_356 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_353), SN(32, 1)); + RzILOpPure *op_ADD_359 = ADD(op_LSHIFT_356, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_361 = SHIFTRA(op_ADD_359, SN(32, 16)); + RzILOpPure *op_LT_364 = SLT(op_RSHIFT_361, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_369 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_370 = NEG(op_LSHIFT_369); + RzILOpPure *op_LSHIFT_375 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_378 = SUB(op_LSHIFT_375, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_379 = ITE(op_LT_364, op_NEG_370, op_SUB_378); + RzILOpEffect *gcc_expr_380 = BRANCH(op_EQ_286, EMPTY(), set_usr_field_call_325); + + // h_tmp319 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_382 = SETL("h_tmp319", cond_379); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st6 ...; + RzILOpEffect *seq_383 = SEQN(2, gcc_expr_380, op_ASSIGN_hybrid_tmp_382); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10)) ? ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10) : h_tmp319) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_203 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_204 = LOGNOT(op_LSHIFT_203); + RzILOpPure *op_AND_205 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_204); + RzILOpPure *op_RSHIFT_290 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_292 = LOGAND(op_RSHIFT_290, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_303 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_306 = LOGAND(op_RSHIFT_303, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_314 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_292), DUP(op_AND_292))), CAST(32, MSB(DUP(op_AND_292)), DUP(op_AND_292)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_292)), DUP(op_AND_292))), CAST(32, MSB(DUP(op_AND_292)), DUP(op_AND_292))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_292)), DUP(op_AND_292))), CAST(32, MSB(DUP(op_AND_292)), DUP(op_AND_292)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_292)), DUP(op_AND_292))), CAST(32, MSB(DUP(op_AND_292)), DUP(op_AND_292)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_306)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_317 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_314), SN(32, 1)); + RzILOpPure *op_ADD_320 = ADD(op_LSHIFT_317, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_322 = SHIFTRA(op_ADD_320, SN(32, 16)); + RzILOpPure *cond_384 = ITE(DUP(op_EQ_286), op_RSHIFT_322, VARL("h_tmp319")); + RzILOpPure *op_AND_386 = LOGAND(cond_384, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_390 = SHIFTL0(op_AND_386, SN(32, 0)); + RzILOpPure *op_OR_391 = LOGOR(op_AND_205, op_LSHIFT_390); + RzILOpEffect *op_ASSIGN_392 = WRITE_REG(bundle, Rdd_op, op_OR_391); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ( ...; + RzILOpEffect *seq_393 = SEQN(2, seq_383, op_ASSIGN_392); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_197, seq_393); + return instruction_sequence; +} + +// Rdd = vmpywouh(Rss,Rtt):sat +RzILOpEffect *hex_il_op_m2_mmpyuh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_120 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_17 = LOGAND(op_RSHIFT_15, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_29 = SHIFTRA(Rtt, SN(32, 0x30)); + RzILOpPure *op_AND_32 = LOGAND(op_RSHIFT_29, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_40 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_17), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_32)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_43 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_40), SN(32, 0)); + RzILOpPure *op_RSHIFT_45 = SHIFTRA(op_LSHIFT_43, SN(32, 16)); + RzILOpPure *op_RSHIFT_54 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_56 = LOGAND(op_RSHIFT_54, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_67 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_70 = LOGAND(op_RSHIFT_67, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_78 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_56), DUP(op_AND_56))), CAST(32, MSB(DUP(op_AND_56)), DUP(op_AND_56)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_56)), DUP(op_AND_56))), CAST(32, MSB(DUP(op_AND_56)), DUP(op_AND_56))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_56)), DUP(op_AND_56))), CAST(32, MSB(DUP(op_AND_56)), DUP(op_AND_56)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_56)), DUP(op_AND_56))), CAST(32, MSB(DUP(op_AND_56)), DUP(op_AND_56)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_70)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_81 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_78), SN(32, 0)); + RzILOpPure *op_RSHIFT_83 = SHIFTRA(op_LSHIFT_81, SN(32, 16)); + RzILOpPure *op_EQ_84 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_45), SN(32, 0), SN(32, 0x20)), op_RSHIFT_83); + RzILOpPure *op_RSHIFT_124 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_126 = LOGAND(op_RSHIFT_124, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_137 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_140 = LOGAND(op_RSHIFT_137, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_148 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_126), DUP(op_AND_126))), CAST(32, MSB(DUP(op_AND_126)), DUP(op_AND_126)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_126)), DUP(op_AND_126))), CAST(32, MSB(DUP(op_AND_126)), DUP(op_AND_126))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_126)), DUP(op_AND_126))), CAST(32, MSB(DUP(op_AND_126)), DUP(op_AND_126)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_126)), DUP(op_AND_126))), CAST(32, MSB(DUP(op_AND_126)), DUP(op_AND_126)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_140)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_151 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_148), SN(32, 0)); + RzILOpPure *op_RSHIFT_153 = SHIFTRA(op_LSHIFT_151, SN(32, 16)); + RzILOpPure *op_LT_156 = SLT(op_RSHIFT_153, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_161 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_162 = NEG(op_LSHIFT_161); + RzILOpPure *op_LSHIFT_167 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_170 = SUB(op_LSHIFT_167, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_171 = ITE(op_LT_156, op_NEG_162, op_SUB_170); + RzILOpEffect *gcc_expr_172 = BRANCH(op_EQ_84, EMPTY(), set_usr_field_call_120); + + // h_tmp320 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_174 = SETL("h_tmp320", cond_171); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st6 ...; + RzILOpEffect *seq_175 = SEQN(2, gcc_expr_172, op_ASSIGN_hybrid_tmp_174); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10)) ? ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10) : h_tmp320) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_88 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_90 = LOGAND(op_RSHIFT_88, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_101 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_104 = LOGAND(op_RSHIFT_101, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_112 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_90), DUP(op_AND_90))), CAST(32, MSB(DUP(op_AND_90)), DUP(op_AND_90)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_90)), DUP(op_AND_90))), CAST(32, MSB(DUP(op_AND_90)), DUP(op_AND_90))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_90)), DUP(op_AND_90))), CAST(32, MSB(DUP(op_AND_90)), DUP(op_AND_90)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_90)), DUP(op_AND_90))), CAST(32, MSB(DUP(op_AND_90)), DUP(op_AND_90)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_104)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_115 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_112), SN(32, 0)); + RzILOpPure *op_RSHIFT_117 = SHIFTRA(op_LSHIFT_115, SN(32, 16)); + RzILOpPure *cond_176 = ITE(DUP(op_EQ_84), op_RSHIFT_117, VARL("h_tmp320")); + RzILOpPure *op_AND_178 = LOGAND(cond_176, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_182 = SHIFTL0(op_AND_178, SN(32, 0x20)); + RzILOpPure *op_OR_183 = LOGOR(op_AND_7, op_LSHIFT_182); + RzILOpEffect *op_ASSIGN_184 = WRITE_REG(bundle, Rdd_op, op_OR_183); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ( ...; + RzILOpEffect *seq_185 = SEQN(2, seq_175, op_ASSIGN_184); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_304 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_200 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_202 = LOGAND(op_RSHIFT_200, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_213 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_216 = LOGAND(op_RSHIFT_213, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_224 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_202), DUP(op_AND_202))), CAST(32, MSB(DUP(op_AND_202)), DUP(op_AND_202)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_202)), DUP(op_AND_202))), CAST(32, MSB(DUP(op_AND_202)), DUP(op_AND_202))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_202)), DUP(op_AND_202))), CAST(32, MSB(DUP(op_AND_202)), DUP(op_AND_202)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_202)), DUP(op_AND_202))), CAST(32, MSB(DUP(op_AND_202)), DUP(op_AND_202)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_216)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_227 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_224), SN(32, 0)); + RzILOpPure *op_RSHIFT_229 = SHIFTRA(op_LSHIFT_227, SN(32, 16)); + RzILOpPure *op_RSHIFT_238 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_240 = LOGAND(op_RSHIFT_238, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_251 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_254 = LOGAND(op_RSHIFT_251, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_262 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_240), DUP(op_AND_240))), CAST(32, MSB(DUP(op_AND_240)), DUP(op_AND_240)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_240)), DUP(op_AND_240))), CAST(32, MSB(DUP(op_AND_240)), DUP(op_AND_240))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_240)), DUP(op_AND_240))), CAST(32, MSB(DUP(op_AND_240)), DUP(op_AND_240)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_240)), DUP(op_AND_240))), CAST(32, MSB(DUP(op_AND_240)), DUP(op_AND_240)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_254)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_265 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_262), SN(32, 0)); + RzILOpPure *op_RSHIFT_267 = SHIFTRA(op_LSHIFT_265, SN(32, 16)); + RzILOpPure *op_EQ_268 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_229), SN(32, 0), SN(32, 0x20)), op_RSHIFT_267); + RzILOpPure *op_RSHIFT_308 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_310 = LOGAND(op_RSHIFT_308, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_321 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_324 = LOGAND(op_RSHIFT_321, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_332 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_310), DUP(op_AND_310))), CAST(32, MSB(DUP(op_AND_310)), DUP(op_AND_310)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_310)), DUP(op_AND_310))), CAST(32, MSB(DUP(op_AND_310)), DUP(op_AND_310))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_310)), DUP(op_AND_310))), CAST(32, MSB(DUP(op_AND_310)), DUP(op_AND_310)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_310)), DUP(op_AND_310))), CAST(32, MSB(DUP(op_AND_310)), DUP(op_AND_310)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_324)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_335 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_332), SN(32, 0)); + RzILOpPure *op_RSHIFT_337 = SHIFTRA(op_LSHIFT_335, SN(32, 16)); + RzILOpPure *op_LT_340 = SLT(op_RSHIFT_337, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_345 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_346 = NEG(op_LSHIFT_345); + RzILOpPure *op_LSHIFT_351 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_354 = SUB(op_LSHIFT_351, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_355 = ITE(op_LT_340, op_NEG_346, op_SUB_354); + RzILOpEffect *gcc_expr_356 = BRANCH(op_EQ_268, EMPTY(), set_usr_field_call_304); + + // h_tmp321 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_358 = SETL("h_tmp321", cond_355); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st6 ...; + RzILOpEffect *seq_359 = SEQN(2, gcc_expr_356, op_ASSIGN_hybrid_tmp_358); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10)) ? ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10) : h_tmp321) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_191 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_192 = LOGNOT(op_LSHIFT_191); + RzILOpPure *op_AND_193 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_192); + RzILOpPure *op_RSHIFT_272 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_274 = LOGAND(op_RSHIFT_272, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_285 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_288 = LOGAND(op_RSHIFT_285, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_296 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_274), DUP(op_AND_274))), CAST(32, MSB(DUP(op_AND_274)), DUP(op_AND_274)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_274)), DUP(op_AND_274))), CAST(32, MSB(DUP(op_AND_274)), DUP(op_AND_274))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_274)), DUP(op_AND_274))), CAST(32, MSB(DUP(op_AND_274)), DUP(op_AND_274)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_274)), DUP(op_AND_274))), CAST(32, MSB(DUP(op_AND_274)), DUP(op_AND_274)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_288)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_299 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_296), SN(32, 0)); + RzILOpPure *op_RSHIFT_301 = SHIFTRA(op_LSHIFT_299, SN(32, 16)); + RzILOpPure *cond_360 = ITE(DUP(op_EQ_268), op_RSHIFT_301, VARL("h_tmp321")); + RzILOpPure *op_AND_362 = LOGAND(cond_360, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_366 = SHIFTL0(op_AND_362, SN(32, 0)); + RzILOpPure *op_OR_367 = LOGOR(op_AND_193, op_LSHIFT_366); + RzILOpEffect *op_ASSIGN_368 = WRITE_REG(bundle, Rdd_op, op_OR_367); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ( ...; + RzILOpEffect *seq_369 = SEQN(2, seq_359, op_ASSIGN_368); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_185, seq_369); + return instruction_sequence; +} + +// Rdd = vmpywouh(Rss,Rtt):<<1:sat +RzILOpEffect *hex_il_op_m2_mmpyuh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_120 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_17 = LOGAND(op_RSHIFT_15, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_29 = SHIFTRA(Rtt, SN(32, 0x30)); + RzILOpPure *op_AND_32 = LOGAND(op_RSHIFT_29, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_40 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_17), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_32)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_43 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_40), SN(32, 1)); + RzILOpPure *op_RSHIFT_45 = SHIFTRA(op_LSHIFT_43, SN(32, 16)); + RzILOpPure *op_RSHIFT_54 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_56 = LOGAND(op_RSHIFT_54, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_67 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_70 = LOGAND(op_RSHIFT_67, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_78 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_56), DUP(op_AND_56))), CAST(32, MSB(DUP(op_AND_56)), DUP(op_AND_56)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_56)), DUP(op_AND_56))), CAST(32, MSB(DUP(op_AND_56)), DUP(op_AND_56))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_56)), DUP(op_AND_56))), CAST(32, MSB(DUP(op_AND_56)), DUP(op_AND_56)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_56)), DUP(op_AND_56))), CAST(32, MSB(DUP(op_AND_56)), DUP(op_AND_56)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_70)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_81 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_78), SN(32, 1)); + RzILOpPure *op_RSHIFT_83 = SHIFTRA(op_LSHIFT_81, SN(32, 16)); + RzILOpPure *op_EQ_84 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_45), SN(32, 0), SN(32, 0x20)), op_RSHIFT_83); + RzILOpPure *op_RSHIFT_124 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_126 = LOGAND(op_RSHIFT_124, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_137 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_140 = LOGAND(op_RSHIFT_137, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_148 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_126), DUP(op_AND_126))), CAST(32, MSB(DUP(op_AND_126)), DUP(op_AND_126)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_126)), DUP(op_AND_126))), CAST(32, MSB(DUP(op_AND_126)), DUP(op_AND_126))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_126)), DUP(op_AND_126))), CAST(32, MSB(DUP(op_AND_126)), DUP(op_AND_126)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_126)), DUP(op_AND_126))), CAST(32, MSB(DUP(op_AND_126)), DUP(op_AND_126)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_140)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_151 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_148), SN(32, 1)); + RzILOpPure *op_RSHIFT_153 = SHIFTRA(op_LSHIFT_151, SN(32, 16)); + RzILOpPure *op_LT_156 = SLT(op_RSHIFT_153, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_161 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_162 = NEG(op_LSHIFT_161); + RzILOpPure *op_LSHIFT_167 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_170 = SUB(op_LSHIFT_167, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_171 = ITE(op_LT_156, op_NEG_162, op_SUB_170); + RzILOpEffect *gcc_expr_172 = BRANCH(op_EQ_84, EMPTY(), set_usr_field_call_120); + + // h_tmp322 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_174 = SETL("h_tmp322", cond_171); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st6 ...; + RzILOpEffect *seq_175 = SEQN(2, gcc_expr_172, op_ASSIGN_hybrid_tmp_174); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10)) ? ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10) : h_tmp322) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_88 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_90 = LOGAND(op_RSHIFT_88, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_101 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_104 = LOGAND(op_RSHIFT_101, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_112 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_90), DUP(op_AND_90))), CAST(32, MSB(DUP(op_AND_90)), DUP(op_AND_90)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_90)), DUP(op_AND_90))), CAST(32, MSB(DUP(op_AND_90)), DUP(op_AND_90))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_90)), DUP(op_AND_90))), CAST(32, MSB(DUP(op_AND_90)), DUP(op_AND_90)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_90)), DUP(op_AND_90))), CAST(32, MSB(DUP(op_AND_90)), DUP(op_AND_90)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_104)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_115 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_112), SN(32, 1)); + RzILOpPure *op_RSHIFT_117 = SHIFTRA(op_LSHIFT_115, SN(32, 16)); + RzILOpPure *cond_176 = ITE(DUP(op_EQ_84), op_RSHIFT_117, VARL("h_tmp322")); + RzILOpPure *op_AND_178 = LOGAND(cond_176, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_182 = SHIFTL0(op_AND_178, SN(32, 0x20)); + RzILOpPure *op_OR_183 = LOGOR(op_AND_7, op_LSHIFT_182); + RzILOpEffect *op_ASSIGN_184 = WRITE_REG(bundle, Rdd_op, op_OR_183); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ( ...; + RzILOpEffect *seq_185 = SEQN(2, seq_175, op_ASSIGN_184); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_304 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_200 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_202 = LOGAND(op_RSHIFT_200, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_213 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_216 = LOGAND(op_RSHIFT_213, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_224 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_202), DUP(op_AND_202))), CAST(32, MSB(DUP(op_AND_202)), DUP(op_AND_202)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_202)), DUP(op_AND_202))), CAST(32, MSB(DUP(op_AND_202)), DUP(op_AND_202))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_202)), DUP(op_AND_202))), CAST(32, MSB(DUP(op_AND_202)), DUP(op_AND_202)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_202)), DUP(op_AND_202))), CAST(32, MSB(DUP(op_AND_202)), DUP(op_AND_202)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_216)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_227 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_224), SN(32, 1)); + RzILOpPure *op_RSHIFT_229 = SHIFTRA(op_LSHIFT_227, SN(32, 16)); + RzILOpPure *op_RSHIFT_238 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_240 = LOGAND(op_RSHIFT_238, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_251 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_254 = LOGAND(op_RSHIFT_251, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_262 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_240), DUP(op_AND_240))), CAST(32, MSB(DUP(op_AND_240)), DUP(op_AND_240)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_240)), DUP(op_AND_240))), CAST(32, MSB(DUP(op_AND_240)), DUP(op_AND_240))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_240)), DUP(op_AND_240))), CAST(32, MSB(DUP(op_AND_240)), DUP(op_AND_240)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_240)), DUP(op_AND_240))), CAST(32, MSB(DUP(op_AND_240)), DUP(op_AND_240)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_254)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_265 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_262), SN(32, 1)); + RzILOpPure *op_RSHIFT_267 = SHIFTRA(op_LSHIFT_265, SN(32, 16)); + RzILOpPure *op_EQ_268 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_229), SN(32, 0), SN(32, 0x20)), op_RSHIFT_267); + RzILOpPure *op_RSHIFT_308 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_310 = LOGAND(op_RSHIFT_308, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_321 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_324 = LOGAND(op_RSHIFT_321, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_332 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_310), DUP(op_AND_310))), CAST(32, MSB(DUP(op_AND_310)), DUP(op_AND_310)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_310)), DUP(op_AND_310))), CAST(32, MSB(DUP(op_AND_310)), DUP(op_AND_310))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_310)), DUP(op_AND_310))), CAST(32, MSB(DUP(op_AND_310)), DUP(op_AND_310)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_310)), DUP(op_AND_310))), CAST(32, MSB(DUP(op_AND_310)), DUP(op_AND_310)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_324)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_335 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_332), SN(32, 1)); + RzILOpPure *op_RSHIFT_337 = SHIFTRA(op_LSHIFT_335, SN(32, 16)); + RzILOpPure *op_LT_340 = SLT(op_RSHIFT_337, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_345 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_346 = NEG(op_LSHIFT_345); + RzILOpPure *op_LSHIFT_351 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_354 = SUB(op_LSHIFT_351, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_355 = ITE(op_LT_340, op_NEG_346, op_SUB_354); + RzILOpEffect *gcc_expr_356 = BRANCH(op_EQ_268, EMPTY(), set_usr_field_call_304); + + // h_tmp323 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_358 = SETL("h_tmp323", cond_355); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st6 ...; + RzILOpEffect *seq_359 = SEQN(2, gcc_expr_356, op_ASSIGN_hybrid_tmp_358); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10)) ? ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10) : h_tmp323) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_191 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_192 = LOGNOT(op_LSHIFT_191); + RzILOpPure *op_AND_193 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_192); + RzILOpPure *op_RSHIFT_272 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_274 = LOGAND(op_RSHIFT_272, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_285 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_288 = LOGAND(op_RSHIFT_285, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_296 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_274), DUP(op_AND_274))), CAST(32, MSB(DUP(op_AND_274)), DUP(op_AND_274)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_274)), DUP(op_AND_274))), CAST(32, MSB(DUP(op_AND_274)), DUP(op_AND_274))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_274)), DUP(op_AND_274))), CAST(32, MSB(DUP(op_AND_274)), DUP(op_AND_274)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_274)), DUP(op_AND_274))), CAST(32, MSB(DUP(op_AND_274)), DUP(op_AND_274)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_288)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_299 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_296), SN(32, 1)); + RzILOpPure *op_RSHIFT_301 = SHIFTRA(op_LSHIFT_299, SN(32, 16)); + RzILOpPure *cond_360 = ITE(DUP(op_EQ_268), op_RSHIFT_301, VARL("h_tmp323")); + RzILOpPure *op_AND_362 = LOGAND(cond_360, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_366 = SHIFTL0(op_AND_362, SN(32, 0)); + RzILOpPure *op_OR_367 = LOGOR(op_AND_193, op_LSHIFT_366); + RzILOpEffect *op_ASSIGN_368 = WRITE_REG(bundle, Rdd_op, op_OR_367); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ( ...; + RzILOpEffect *seq_369 = SEQN(2, seq_359, op_ASSIGN_368); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_185, seq_369); + return instruction_sequence; +} + +// Rdd = vmpyweuh(Rss,Rtt):rnd:sat +RzILOpEffect *hex_il_op_m2_mmpyul_rs0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_129 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_17 = LOGAND(op_RSHIFT_15, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_29 = SHIFTRA(Rtt, SN(32, 0x20)); + RzILOpPure *op_AND_32 = LOGAND(op_RSHIFT_29, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_40 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_17), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_32)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_43 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_40), SN(32, 0)); + RzILOpPure *op_ADD_46 = ADD(op_LSHIFT_43, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_48 = SHIFTRA(op_ADD_46, SN(32, 16)); + RzILOpPure *op_RSHIFT_57 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_59 = LOGAND(op_RSHIFT_57, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_70 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_73 = LOGAND(op_RSHIFT_70, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_81 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_59), DUP(op_AND_59))), CAST(32, MSB(DUP(op_AND_59)), DUP(op_AND_59)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_59)), DUP(op_AND_59))), CAST(32, MSB(DUP(op_AND_59)), DUP(op_AND_59))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_59)), DUP(op_AND_59))), CAST(32, MSB(DUP(op_AND_59)), DUP(op_AND_59)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_59)), DUP(op_AND_59))), CAST(32, MSB(DUP(op_AND_59)), DUP(op_AND_59)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_73)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_84 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_81), SN(32, 0)); + RzILOpPure *op_ADD_87 = ADD(op_LSHIFT_84, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_89 = SHIFTRA(op_ADD_87, SN(32, 16)); + RzILOpPure *op_EQ_90 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_48), SN(32, 0), SN(32, 0x20)), op_RSHIFT_89); + RzILOpPure *op_RSHIFT_133 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_135 = LOGAND(op_RSHIFT_133, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_146 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_149 = LOGAND(op_RSHIFT_146, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_157 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_135), DUP(op_AND_135))), CAST(32, MSB(DUP(op_AND_135)), DUP(op_AND_135)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_135)), DUP(op_AND_135))), CAST(32, MSB(DUP(op_AND_135)), DUP(op_AND_135))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_135)), DUP(op_AND_135))), CAST(32, MSB(DUP(op_AND_135)), DUP(op_AND_135)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_135)), DUP(op_AND_135))), CAST(32, MSB(DUP(op_AND_135)), DUP(op_AND_135)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_149)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_160 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_157), SN(32, 0)); + RzILOpPure *op_ADD_163 = ADD(op_LSHIFT_160, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_165 = SHIFTRA(op_ADD_163, SN(32, 16)); + RzILOpPure *op_LT_168 = SLT(op_RSHIFT_165, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_173 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_174 = NEG(op_LSHIFT_173); + RzILOpPure *op_LSHIFT_179 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_182 = SUB(op_LSHIFT_179, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_183 = ITE(op_LT_168, op_NEG_174, op_SUB_182); + RzILOpEffect *gcc_expr_184 = BRANCH(op_EQ_90, EMPTY(), set_usr_field_call_129); + + // h_tmp324 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_186 = SETL("h_tmp324", cond_183); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st6 ...; + RzILOpEffect *seq_187 = SEQN(2, gcc_expr_184, op_ASSIGN_hybrid_tmp_186); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10)) ? ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10) : h_tmp324) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_94 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_96 = LOGAND(op_RSHIFT_94, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_107 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_110 = LOGAND(op_RSHIFT_107, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_118 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_96), DUP(op_AND_96))), CAST(32, MSB(DUP(op_AND_96)), DUP(op_AND_96)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_96)), DUP(op_AND_96))), CAST(32, MSB(DUP(op_AND_96)), DUP(op_AND_96))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_96)), DUP(op_AND_96))), CAST(32, MSB(DUP(op_AND_96)), DUP(op_AND_96)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_96)), DUP(op_AND_96))), CAST(32, MSB(DUP(op_AND_96)), DUP(op_AND_96)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_110)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_121 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_118), SN(32, 0)); + RzILOpPure *op_ADD_124 = ADD(op_LSHIFT_121, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_126 = SHIFTRA(op_ADD_124, SN(32, 16)); + RzILOpPure *cond_188 = ITE(DUP(op_EQ_90), op_RSHIFT_126, VARL("h_tmp324")); + RzILOpPure *op_AND_190 = LOGAND(cond_188, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_194 = SHIFTL0(op_AND_190, SN(32, 0x20)); + RzILOpPure *op_OR_195 = LOGOR(op_AND_7, op_LSHIFT_194); + RzILOpEffect *op_ASSIGN_196 = WRITE_REG(bundle, Rdd_op, op_OR_195); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ( ...; + RzILOpEffect *seq_197 = SEQN(2, seq_187, op_ASSIGN_196); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_325 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_212 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_214 = LOGAND(op_RSHIFT_212, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_225 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_228 = LOGAND(op_RSHIFT_225, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_236 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_214), DUP(op_AND_214))), CAST(32, MSB(DUP(op_AND_214)), DUP(op_AND_214)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_214)), DUP(op_AND_214))), CAST(32, MSB(DUP(op_AND_214)), DUP(op_AND_214))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_214)), DUP(op_AND_214))), CAST(32, MSB(DUP(op_AND_214)), DUP(op_AND_214)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_214)), DUP(op_AND_214))), CAST(32, MSB(DUP(op_AND_214)), DUP(op_AND_214)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_228)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_239 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_236), SN(32, 0)); + RzILOpPure *op_ADD_242 = ADD(op_LSHIFT_239, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_244 = SHIFTRA(op_ADD_242, SN(32, 16)); + RzILOpPure *op_RSHIFT_253 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_255 = LOGAND(op_RSHIFT_253, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_266 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_269 = LOGAND(op_RSHIFT_266, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_277 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_255), DUP(op_AND_255))), CAST(32, MSB(DUP(op_AND_255)), DUP(op_AND_255)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_255)), DUP(op_AND_255))), CAST(32, MSB(DUP(op_AND_255)), DUP(op_AND_255))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_255)), DUP(op_AND_255))), CAST(32, MSB(DUP(op_AND_255)), DUP(op_AND_255)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_255)), DUP(op_AND_255))), CAST(32, MSB(DUP(op_AND_255)), DUP(op_AND_255)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_269)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_280 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_277), SN(32, 0)); + RzILOpPure *op_ADD_283 = ADD(op_LSHIFT_280, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_285 = SHIFTRA(op_ADD_283, SN(32, 16)); + RzILOpPure *op_EQ_286 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_244), SN(32, 0), SN(32, 0x20)), op_RSHIFT_285); + RzILOpPure *op_RSHIFT_329 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_331 = LOGAND(op_RSHIFT_329, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_342 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_345 = LOGAND(op_RSHIFT_342, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_353 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_331), DUP(op_AND_331))), CAST(32, MSB(DUP(op_AND_331)), DUP(op_AND_331)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_331)), DUP(op_AND_331))), CAST(32, MSB(DUP(op_AND_331)), DUP(op_AND_331))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_331)), DUP(op_AND_331))), CAST(32, MSB(DUP(op_AND_331)), DUP(op_AND_331)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_331)), DUP(op_AND_331))), CAST(32, MSB(DUP(op_AND_331)), DUP(op_AND_331)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_345)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_356 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_353), SN(32, 0)); + RzILOpPure *op_ADD_359 = ADD(op_LSHIFT_356, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_361 = SHIFTRA(op_ADD_359, SN(32, 16)); + RzILOpPure *op_LT_364 = SLT(op_RSHIFT_361, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_369 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_370 = NEG(op_LSHIFT_369); + RzILOpPure *op_LSHIFT_375 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_378 = SUB(op_LSHIFT_375, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_379 = ITE(op_LT_364, op_NEG_370, op_SUB_378); + RzILOpEffect *gcc_expr_380 = BRANCH(op_EQ_286, EMPTY(), set_usr_field_call_325); + + // h_tmp325 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_382 = SETL("h_tmp325", cond_379); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st6 ...; + RzILOpEffect *seq_383 = SEQN(2, gcc_expr_380, op_ASSIGN_hybrid_tmp_382); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10)) ? ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) + ((st64) 0x8000) >> 0x10) : h_tmp325) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_203 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_204 = LOGNOT(op_LSHIFT_203); + RzILOpPure *op_AND_205 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_204); + RzILOpPure *op_RSHIFT_290 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_292 = LOGAND(op_RSHIFT_290, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_303 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_306 = LOGAND(op_RSHIFT_303, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_314 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_292), DUP(op_AND_292))), CAST(32, MSB(DUP(op_AND_292)), DUP(op_AND_292)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_292)), DUP(op_AND_292))), CAST(32, MSB(DUP(op_AND_292)), DUP(op_AND_292))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_292)), DUP(op_AND_292))), CAST(32, MSB(DUP(op_AND_292)), DUP(op_AND_292)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_292)), DUP(op_AND_292))), CAST(32, MSB(DUP(op_AND_292)), DUP(op_AND_292)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_306)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_317 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_314), SN(32, 0)); + RzILOpPure *op_ADD_320 = ADD(op_LSHIFT_317, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_322 = SHIFTRA(op_ADD_320, SN(32, 16)); + RzILOpPure *cond_384 = ITE(DUP(op_EQ_286), op_RSHIFT_322, VARL("h_tmp325")); + RzILOpPure *op_AND_386 = LOGAND(cond_384, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_390 = SHIFTL0(op_AND_386, SN(32, 0)); + RzILOpPure *op_OR_391 = LOGOR(op_AND_205, op_LSHIFT_390); + RzILOpEffect *op_ASSIGN_392 = WRITE_REG(bundle, Rdd_op, op_OR_391); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ( ...; + RzILOpEffect *seq_393 = SEQN(2, seq_383, op_ASSIGN_392); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_197, seq_393); + return instruction_sequence; +} + +// Rdd = vmpyweuh(Rss,Rtt):<<1:rnd:sat +RzILOpEffect *hex_il_op_m2_mmpyul_rs1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_129 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_17 = LOGAND(op_RSHIFT_15, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_29 = SHIFTRA(Rtt, SN(32, 0x20)); + RzILOpPure *op_AND_32 = LOGAND(op_RSHIFT_29, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_40 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_17), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_32)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_43 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_40), SN(32, 1)); + RzILOpPure *op_ADD_46 = ADD(op_LSHIFT_43, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_48 = SHIFTRA(op_ADD_46, SN(32, 16)); + RzILOpPure *op_RSHIFT_57 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_59 = LOGAND(op_RSHIFT_57, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_70 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_73 = LOGAND(op_RSHIFT_70, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_81 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_59), DUP(op_AND_59))), CAST(32, MSB(DUP(op_AND_59)), DUP(op_AND_59)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_59)), DUP(op_AND_59))), CAST(32, MSB(DUP(op_AND_59)), DUP(op_AND_59))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_59)), DUP(op_AND_59))), CAST(32, MSB(DUP(op_AND_59)), DUP(op_AND_59)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_59)), DUP(op_AND_59))), CAST(32, MSB(DUP(op_AND_59)), DUP(op_AND_59)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_73)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_84 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_81), SN(32, 1)); + RzILOpPure *op_ADD_87 = ADD(op_LSHIFT_84, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_89 = SHIFTRA(op_ADD_87, SN(32, 16)); + RzILOpPure *op_EQ_90 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_48), SN(32, 0), SN(32, 0x20)), op_RSHIFT_89); + RzILOpPure *op_RSHIFT_133 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_135 = LOGAND(op_RSHIFT_133, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_146 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_149 = LOGAND(op_RSHIFT_146, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_157 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_135), DUP(op_AND_135))), CAST(32, MSB(DUP(op_AND_135)), DUP(op_AND_135)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_135)), DUP(op_AND_135))), CAST(32, MSB(DUP(op_AND_135)), DUP(op_AND_135))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_135)), DUP(op_AND_135))), CAST(32, MSB(DUP(op_AND_135)), DUP(op_AND_135)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_135)), DUP(op_AND_135))), CAST(32, MSB(DUP(op_AND_135)), DUP(op_AND_135)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_149)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_160 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_157), SN(32, 1)); + RzILOpPure *op_ADD_163 = ADD(op_LSHIFT_160, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_165 = SHIFTRA(op_ADD_163, SN(32, 16)); + RzILOpPure *op_LT_168 = SLT(op_RSHIFT_165, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_173 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_174 = NEG(op_LSHIFT_173); + RzILOpPure *op_LSHIFT_179 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_182 = SUB(op_LSHIFT_179, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_183 = ITE(op_LT_168, op_NEG_174, op_SUB_182); + RzILOpEffect *gcc_expr_184 = BRANCH(op_EQ_90, EMPTY(), set_usr_field_call_129); + + // h_tmp326 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_186 = SETL("h_tmp326", cond_183); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st6 ...; + RzILOpEffect *seq_187 = SEQN(2, gcc_expr_184, op_ASSIGN_hybrid_tmp_186); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10)) ? ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10) : h_tmp326) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_94 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_96 = LOGAND(op_RSHIFT_94, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_107 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_110 = LOGAND(op_RSHIFT_107, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_118 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_96), DUP(op_AND_96))), CAST(32, MSB(DUP(op_AND_96)), DUP(op_AND_96)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_96)), DUP(op_AND_96))), CAST(32, MSB(DUP(op_AND_96)), DUP(op_AND_96))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_96)), DUP(op_AND_96))), CAST(32, MSB(DUP(op_AND_96)), DUP(op_AND_96)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_96)), DUP(op_AND_96))), CAST(32, MSB(DUP(op_AND_96)), DUP(op_AND_96)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_110)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_121 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_118), SN(32, 1)); + RzILOpPure *op_ADD_124 = ADD(op_LSHIFT_121, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_126 = SHIFTRA(op_ADD_124, SN(32, 16)); + RzILOpPure *cond_188 = ITE(DUP(op_EQ_90), op_RSHIFT_126, VARL("h_tmp326")); + RzILOpPure *op_AND_190 = LOGAND(cond_188, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_194 = SHIFTL0(op_AND_190, SN(32, 0x20)); + RzILOpPure *op_OR_195 = LOGOR(op_AND_7, op_LSHIFT_194); + RzILOpEffect *op_ASSIGN_196 = WRITE_REG(bundle, Rdd_op, op_OR_195); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ( ...; + RzILOpEffect *seq_197 = SEQN(2, seq_187, op_ASSIGN_196); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_325 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_212 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_214 = LOGAND(op_RSHIFT_212, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_225 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_228 = LOGAND(op_RSHIFT_225, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_236 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_214), DUP(op_AND_214))), CAST(32, MSB(DUP(op_AND_214)), DUP(op_AND_214)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_214)), DUP(op_AND_214))), CAST(32, MSB(DUP(op_AND_214)), DUP(op_AND_214))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_214)), DUP(op_AND_214))), CAST(32, MSB(DUP(op_AND_214)), DUP(op_AND_214)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_214)), DUP(op_AND_214))), CAST(32, MSB(DUP(op_AND_214)), DUP(op_AND_214)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_228)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_239 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_236), SN(32, 1)); + RzILOpPure *op_ADD_242 = ADD(op_LSHIFT_239, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_244 = SHIFTRA(op_ADD_242, SN(32, 16)); + RzILOpPure *op_RSHIFT_253 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_255 = LOGAND(op_RSHIFT_253, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_266 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_269 = LOGAND(op_RSHIFT_266, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_277 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_255), DUP(op_AND_255))), CAST(32, MSB(DUP(op_AND_255)), DUP(op_AND_255)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_255)), DUP(op_AND_255))), CAST(32, MSB(DUP(op_AND_255)), DUP(op_AND_255))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_255)), DUP(op_AND_255))), CAST(32, MSB(DUP(op_AND_255)), DUP(op_AND_255)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_255)), DUP(op_AND_255))), CAST(32, MSB(DUP(op_AND_255)), DUP(op_AND_255)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_269)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_280 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_277), SN(32, 1)); + RzILOpPure *op_ADD_283 = ADD(op_LSHIFT_280, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_285 = SHIFTRA(op_ADD_283, SN(32, 16)); + RzILOpPure *op_EQ_286 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_244), SN(32, 0), SN(32, 0x20)), op_RSHIFT_285); + RzILOpPure *op_RSHIFT_329 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_331 = LOGAND(op_RSHIFT_329, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_342 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_345 = LOGAND(op_RSHIFT_342, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_353 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_331), DUP(op_AND_331))), CAST(32, MSB(DUP(op_AND_331)), DUP(op_AND_331)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_331)), DUP(op_AND_331))), CAST(32, MSB(DUP(op_AND_331)), DUP(op_AND_331))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_331)), DUP(op_AND_331))), CAST(32, MSB(DUP(op_AND_331)), DUP(op_AND_331)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_331)), DUP(op_AND_331))), CAST(32, MSB(DUP(op_AND_331)), DUP(op_AND_331)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_345)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_356 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_353), SN(32, 1)); + RzILOpPure *op_ADD_359 = ADD(op_LSHIFT_356, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_361 = SHIFTRA(op_ADD_359, SN(32, 16)); + RzILOpPure *op_LT_364 = SLT(op_RSHIFT_361, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_369 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_370 = NEG(op_LSHIFT_369); + RzILOpPure *op_LSHIFT_375 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_378 = SUB(op_LSHIFT_375, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_379 = ITE(op_LT_364, op_NEG_370, op_SUB_378); + RzILOpEffect *gcc_expr_380 = BRANCH(op_EQ_286, EMPTY(), set_usr_field_call_325); + + // h_tmp327 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_382 = SETL("h_tmp327", cond_379); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st6 ...; + RzILOpEffect *seq_383 = SEQN(2, gcc_expr_380, op_ASSIGN_hybrid_tmp_382); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10)) ? ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) + ((st64) 0x8000) >> 0x10) : h_tmp327) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_203 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_204 = LOGNOT(op_LSHIFT_203); + RzILOpPure *op_AND_205 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_204); + RzILOpPure *op_RSHIFT_290 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_292 = LOGAND(op_RSHIFT_290, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_303 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_306 = LOGAND(op_RSHIFT_303, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_314 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_292), DUP(op_AND_292))), CAST(32, MSB(DUP(op_AND_292)), DUP(op_AND_292)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_292)), DUP(op_AND_292))), CAST(32, MSB(DUP(op_AND_292)), DUP(op_AND_292))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_292)), DUP(op_AND_292))), CAST(32, MSB(DUP(op_AND_292)), DUP(op_AND_292)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_292)), DUP(op_AND_292))), CAST(32, MSB(DUP(op_AND_292)), DUP(op_AND_292)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_306)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_317 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_314), SN(32, 1)); + RzILOpPure *op_ADD_320 = ADD(op_LSHIFT_317, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_322 = SHIFTRA(op_ADD_320, SN(32, 16)); + RzILOpPure *cond_384 = ITE(DUP(op_EQ_286), op_RSHIFT_322, VARL("h_tmp327")); + RzILOpPure *op_AND_386 = LOGAND(cond_384, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_390 = SHIFTL0(op_AND_386, SN(32, 0)); + RzILOpPure *op_OR_391 = LOGOR(op_AND_205, op_LSHIFT_390); + RzILOpEffect *op_ASSIGN_392 = WRITE_REG(bundle, Rdd_op, op_OR_391); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ( ...; + RzILOpEffect *seq_393 = SEQN(2, seq_383, op_ASSIGN_392); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_197, seq_393); + return instruction_sequence; +} + +// Rdd = vmpyweuh(Rss,Rtt):sat +RzILOpEffect *hex_il_op_m2_mmpyul_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_120 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_17 = LOGAND(op_RSHIFT_15, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_29 = SHIFTRA(Rtt, SN(32, 0x20)); + RzILOpPure *op_AND_32 = LOGAND(op_RSHIFT_29, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_40 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_17), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_32)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_43 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_40), SN(32, 0)); + RzILOpPure *op_RSHIFT_45 = SHIFTRA(op_LSHIFT_43, SN(32, 16)); + RzILOpPure *op_RSHIFT_54 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_56 = LOGAND(op_RSHIFT_54, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_67 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_70 = LOGAND(op_RSHIFT_67, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_78 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_56), DUP(op_AND_56))), CAST(32, MSB(DUP(op_AND_56)), DUP(op_AND_56)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_56)), DUP(op_AND_56))), CAST(32, MSB(DUP(op_AND_56)), DUP(op_AND_56))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_56)), DUP(op_AND_56))), CAST(32, MSB(DUP(op_AND_56)), DUP(op_AND_56)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_56)), DUP(op_AND_56))), CAST(32, MSB(DUP(op_AND_56)), DUP(op_AND_56)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_70)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_81 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_78), SN(32, 0)); + RzILOpPure *op_RSHIFT_83 = SHIFTRA(op_LSHIFT_81, SN(32, 16)); + RzILOpPure *op_EQ_84 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_45), SN(32, 0), SN(32, 0x20)), op_RSHIFT_83); + RzILOpPure *op_RSHIFT_124 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_126 = LOGAND(op_RSHIFT_124, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_137 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_140 = LOGAND(op_RSHIFT_137, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_148 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_126), DUP(op_AND_126))), CAST(32, MSB(DUP(op_AND_126)), DUP(op_AND_126)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_126)), DUP(op_AND_126))), CAST(32, MSB(DUP(op_AND_126)), DUP(op_AND_126))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_126)), DUP(op_AND_126))), CAST(32, MSB(DUP(op_AND_126)), DUP(op_AND_126)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_126)), DUP(op_AND_126))), CAST(32, MSB(DUP(op_AND_126)), DUP(op_AND_126)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_140)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_151 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_148), SN(32, 0)); + RzILOpPure *op_RSHIFT_153 = SHIFTRA(op_LSHIFT_151, SN(32, 16)); + RzILOpPure *op_LT_156 = SLT(op_RSHIFT_153, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_161 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_162 = NEG(op_LSHIFT_161); + RzILOpPure *op_LSHIFT_167 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_170 = SUB(op_LSHIFT_167, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_171 = ITE(op_LT_156, op_NEG_162, op_SUB_170); + RzILOpEffect *gcc_expr_172 = BRANCH(op_EQ_84, EMPTY(), set_usr_field_call_120); + + // h_tmp328 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_174 = SETL("h_tmp328", cond_171); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st6 ...; + RzILOpEffect *seq_175 = SEQN(2, gcc_expr_172, op_ASSIGN_hybrid_tmp_174); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10)) ? ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10) : h_tmp328) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_88 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_90 = LOGAND(op_RSHIFT_88, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_101 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_104 = LOGAND(op_RSHIFT_101, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_112 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_90), DUP(op_AND_90))), CAST(32, MSB(DUP(op_AND_90)), DUP(op_AND_90)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_90)), DUP(op_AND_90))), CAST(32, MSB(DUP(op_AND_90)), DUP(op_AND_90))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_90)), DUP(op_AND_90))), CAST(32, MSB(DUP(op_AND_90)), DUP(op_AND_90)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_90)), DUP(op_AND_90))), CAST(32, MSB(DUP(op_AND_90)), DUP(op_AND_90)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_104)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_115 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_112), SN(32, 0)); + RzILOpPure *op_RSHIFT_117 = SHIFTRA(op_LSHIFT_115, SN(32, 16)); + RzILOpPure *cond_176 = ITE(DUP(op_EQ_84), op_RSHIFT_117, VARL("h_tmp328")); + RzILOpPure *op_AND_178 = LOGAND(cond_176, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_182 = SHIFTL0(op_AND_178, SN(32, 0x20)); + RzILOpPure *op_OR_183 = LOGOR(op_AND_7, op_LSHIFT_182); + RzILOpEffect *op_ASSIGN_184 = WRITE_REG(bundle, Rdd_op, op_OR_183); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ( ...; + RzILOpEffect *seq_185 = SEQN(2, seq_175, op_ASSIGN_184); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_304 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_200 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_202 = LOGAND(op_RSHIFT_200, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_213 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_216 = LOGAND(op_RSHIFT_213, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_224 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_202), DUP(op_AND_202))), CAST(32, MSB(DUP(op_AND_202)), DUP(op_AND_202)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_202)), DUP(op_AND_202))), CAST(32, MSB(DUP(op_AND_202)), DUP(op_AND_202))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_202)), DUP(op_AND_202))), CAST(32, MSB(DUP(op_AND_202)), DUP(op_AND_202)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_202)), DUP(op_AND_202))), CAST(32, MSB(DUP(op_AND_202)), DUP(op_AND_202)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_216)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_227 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_224), SN(32, 0)); + RzILOpPure *op_RSHIFT_229 = SHIFTRA(op_LSHIFT_227, SN(32, 16)); + RzILOpPure *op_RSHIFT_238 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_240 = LOGAND(op_RSHIFT_238, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_251 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_254 = LOGAND(op_RSHIFT_251, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_262 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_240), DUP(op_AND_240))), CAST(32, MSB(DUP(op_AND_240)), DUP(op_AND_240)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_240)), DUP(op_AND_240))), CAST(32, MSB(DUP(op_AND_240)), DUP(op_AND_240))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_240)), DUP(op_AND_240))), CAST(32, MSB(DUP(op_AND_240)), DUP(op_AND_240)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_240)), DUP(op_AND_240))), CAST(32, MSB(DUP(op_AND_240)), DUP(op_AND_240)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_254)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_265 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_262), SN(32, 0)); + RzILOpPure *op_RSHIFT_267 = SHIFTRA(op_LSHIFT_265, SN(32, 16)); + RzILOpPure *op_EQ_268 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_229), SN(32, 0), SN(32, 0x20)), op_RSHIFT_267); + RzILOpPure *op_RSHIFT_308 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_310 = LOGAND(op_RSHIFT_308, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_321 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_324 = LOGAND(op_RSHIFT_321, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_332 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_310), DUP(op_AND_310))), CAST(32, MSB(DUP(op_AND_310)), DUP(op_AND_310)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_310)), DUP(op_AND_310))), CAST(32, MSB(DUP(op_AND_310)), DUP(op_AND_310))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_310)), DUP(op_AND_310))), CAST(32, MSB(DUP(op_AND_310)), DUP(op_AND_310)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_310)), DUP(op_AND_310))), CAST(32, MSB(DUP(op_AND_310)), DUP(op_AND_310)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_324)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_335 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_332), SN(32, 0)); + RzILOpPure *op_RSHIFT_337 = SHIFTRA(op_LSHIFT_335, SN(32, 16)); + RzILOpPure *op_LT_340 = SLT(op_RSHIFT_337, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_345 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_346 = NEG(op_LSHIFT_345); + RzILOpPure *op_LSHIFT_351 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_354 = SUB(op_LSHIFT_351, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_355 = ITE(op_LT_340, op_NEG_346, op_SUB_354); + RzILOpEffect *gcc_expr_356 = BRANCH(op_EQ_268, EMPTY(), set_usr_field_call_304); + + // h_tmp329 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_358 = SETL("h_tmp329", cond_355); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st6 ...; + RzILOpEffect *seq_359 = SEQN(2, gcc_expr_356, op_ASSIGN_hybrid_tmp_358); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10)) ? ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x0) >> 0x10) : h_tmp329) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_191 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_192 = LOGNOT(op_LSHIFT_191); + RzILOpPure *op_AND_193 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_192); + RzILOpPure *op_RSHIFT_272 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_274 = LOGAND(op_RSHIFT_272, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_285 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_288 = LOGAND(op_RSHIFT_285, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_296 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_274), DUP(op_AND_274))), CAST(32, MSB(DUP(op_AND_274)), DUP(op_AND_274)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_274)), DUP(op_AND_274))), CAST(32, MSB(DUP(op_AND_274)), DUP(op_AND_274))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_274)), DUP(op_AND_274))), CAST(32, MSB(DUP(op_AND_274)), DUP(op_AND_274)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_274)), DUP(op_AND_274))), CAST(32, MSB(DUP(op_AND_274)), DUP(op_AND_274)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_288)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_299 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_296), SN(32, 0)); + RzILOpPure *op_RSHIFT_301 = SHIFTRA(op_LSHIFT_299, SN(32, 16)); + RzILOpPure *cond_360 = ITE(DUP(op_EQ_268), op_RSHIFT_301, VARL("h_tmp329")); + RzILOpPure *op_AND_362 = LOGAND(cond_360, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_366 = SHIFTL0(op_AND_362, SN(32, 0)); + RzILOpPure *op_OR_367 = LOGOR(op_AND_193, op_LSHIFT_366); + RzILOpEffect *op_ASSIGN_368 = WRITE_REG(bundle, Rdd_op, op_OR_367); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ( ...; + RzILOpEffect *seq_369 = SEQN(2, seq_359, op_ASSIGN_368); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_185, seq_369); + return instruction_sequence; +} + +// Rdd = vmpyweuh(Rss,Rtt):<<1:sat +RzILOpEffect *hex_il_op_m2_mmpyul_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_120 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_17 = LOGAND(op_RSHIFT_15, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_29 = SHIFTRA(Rtt, SN(32, 0x20)); + RzILOpPure *op_AND_32 = LOGAND(op_RSHIFT_29, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_40 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_17), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_32)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_43 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_40), SN(32, 1)); + RzILOpPure *op_RSHIFT_45 = SHIFTRA(op_LSHIFT_43, SN(32, 16)); + RzILOpPure *op_RSHIFT_54 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_56 = LOGAND(op_RSHIFT_54, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_67 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_70 = LOGAND(op_RSHIFT_67, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_78 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_56), DUP(op_AND_56))), CAST(32, MSB(DUP(op_AND_56)), DUP(op_AND_56)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_56)), DUP(op_AND_56))), CAST(32, MSB(DUP(op_AND_56)), DUP(op_AND_56))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_56)), DUP(op_AND_56))), CAST(32, MSB(DUP(op_AND_56)), DUP(op_AND_56)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_56)), DUP(op_AND_56))), CAST(32, MSB(DUP(op_AND_56)), DUP(op_AND_56)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_70)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_81 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_78), SN(32, 1)); + RzILOpPure *op_RSHIFT_83 = SHIFTRA(op_LSHIFT_81, SN(32, 16)); + RzILOpPure *op_EQ_84 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_45), SN(32, 0), SN(32, 0x20)), op_RSHIFT_83); + RzILOpPure *op_RSHIFT_124 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_126 = LOGAND(op_RSHIFT_124, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_137 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_140 = LOGAND(op_RSHIFT_137, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_148 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_126), DUP(op_AND_126))), CAST(32, MSB(DUP(op_AND_126)), DUP(op_AND_126)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_126)), DUP(op_AND_126))), CAST(32, MSB(DUP(op_AND_126)), DUP(op_AND_126))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_126)), DUP(op_AND_126))), CAST(32, MSB(DUP(op_AND_126)), DUP(op_AND_126)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_126)), DUP(op_AND_126))), CAST(32, MSB(DUP(op_AND_126)), DUP(op_AND_126)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_140)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_151 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_148), SN(32, 1)); + RzILOpPure *op_RSHIFT_153 = SHIFTRA(op_LSHIFT_151, SN(32, 16)); + RzILOpPure *op_LT_156 = SLT(op_RSHIFT_153, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_161 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_162 = NEG(op_LSHIFT_161); + RzILOpPure *op_LSHIFT_167 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_170 = SUB(op_LSHIFT_167, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_171 = ITE(op_LT_156, op_NEG_162, op_SUB_170); + RzILOpEffect *gcc_expr_172 = BRANCH(op_EQ_84, EMPTY(), set_usr_field_call_120); + + // h_tmp330 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_174 = SETL("h_tmp330", cond_171); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st6 ...; + RzILOpEffect *seq_175 = SEQN(2, gcc_expr_172, op_ASSIGN_hybrid_tmp_174); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10)) ? ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10) : h_tmp330) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_88 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_90 = LOGAND(op_RSHIFT_88, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_101 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_104 = LOGAND(op_RSHIFT_101, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_112 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_90), DUP(op_AND_90))), CAST(32, MSB(DUP(op_AND_90)), DUP(op_AND_90)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_90)), DUP(op_AND_90))), CAST(32, MSB(DUP(op_AND_90)), DUP(op_AND_90))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_90)), DUP(op_AND_90))), CAST(32, MSB(DUP(op_AND_90)), DUP(op_AND_90)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_90)), DUP(op_AND_90))), CAST(32, MSB(DUP(op_AND_90)), DUP(op_AND_90)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_104)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_115 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_112), SN(32, 1)); + RzILOpPure *op_RSHIFT_117 = SHIFTRA(op_LSHIFT_115, SN(32, 16)); + RzILOpPure *cond_176 = ITE(DUP(op_EQ_84), op_RSHIFT_117, VARL("h_tmp330")); + RzILOpPure *op_AND_178 = LOGAND(cond_176, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_182 = SHIFTL0(op_AND_178, SN(32, 0x20)); + RzILOpPure *op_OR_183 = LOGOR(op_AND_7, op_LSHIFT_182); + RzILOpEffect *op_ASSIGN_184 = WRITE_REG(bundle, Rdd_op, op_OR_183); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ( ...; + RzILOpEffect *seq_185 = SEQN(2, seq_175, op_ASSIGN_184); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_304 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_200 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_202 = LOGAND(op_RSHIFT_200, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_213 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_216 = LOGAND(op_RSHIFT_213, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_224 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_202), DUP(op_AND_202))), CAST(32, MSB(DUP(op_AND_202)), DUP(op_AND_202)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_202)), DUP(op_AND_202))), CAST(32, MSB(DUP(op_AND_202)), DUP(op_AND_202))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_202)), DUP(op_AND_202))), CAST(32, MSB(DUP(op_AND_202)), DUP(op_AND_202)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_202)), DUP(op_AND_202))), CAST(32, MSB(DUP(op_AND_202)), DUP(op_AND_202)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_216)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_227 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_224), SN(32, 1)); + RzILOpPure *op_RSHIFT_229 = SHIFTRA(op_LSHIFT_227, SN(32, 16)); + RzILOpPure *op_RSHIFT_238 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_240 = LOGAND(op_RSHIFT_238, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_251 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_254 = LOGAND(op_RSHIFT_251, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_262 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_240), DUP(op_AND_240))), CAST(32, MSB(DUP(op_AND_240)), DUP(op_AND_240)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_240)), DUP(op_AND_240))), CAST(32, MSB(DUP(op_AND_240)), DUP(op_AND_240))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_240)), DUP(op_AND_240))), CAST(32, MSB(DUP(op_AND_240)), DUP(op_AND_240)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_240)), DUP(op_AND_240))), CAST(32, MSB(DUP(op_AND_240)), DUP(op_AND_240)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_254)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_265 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_262), SN(32, 1)); + RzILOpPure *op_RSHIFT_267 = SHIFTRA(op_LSHIFT_265, SN(32, 16)); + RzILOpPure *op_EQ_268 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_229), SN(32, 0), SN(32, 0x20)), op_RSHIFT_267); + RzILOpPure *op_RSHIFT_308 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_310 = LOGAND(op_RSHIFT_308, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_321 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_324 = LOGAND(op_RSHIFT_321, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_332 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_310), DUP(op_AND_310))), CAST(32, MSB(DUP(op_AND_310)), DUP(op_AND_310)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_310)), DUP(op_AND_310))), CAST(32, MSB(DUP(op_AND_310)), DUP(op_AND_310))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_310)), DUP(op_AND_310))), CAST(32, MSB(DUP(op_AND_310)), DUP(op_AND_310)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_310)), DUP(op_AND_310))), CAST(32, MSB(DUP(op_AND_310)), DUP(op_AND_310)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_324)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_335 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_332), SN(32, 1)); + RzILOpPure *op_RSHIFT_337 = SHIFTRA(op_LSHIFT_335, SN(32, 16)); + RzILOpPure *op_LT_340 = SLT(op_RSHIFT_337, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_345 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_346 = NEG(op_LSHIFT_345); + RzILOpPure *op_LSHIFT_351 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_354 = SUB(op_LSHIFT_351, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_355 = ITE(op_LT_340, op_NEG_346, op_SUB_354); + RzILOpEffect *gcc_expr_356 = BRANCH(op_EQ_268, EMPTY(), set_usr_field_call_304); + + // h_tmp331 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_358 = SETL("h_tmp331", cond_355); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ((st6 ...; + RzILOpEffect *seq_359 = SEQN(2, gcc_expr_356, op_ASSIGN_hybrid_tmp_358); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10)), 0x0, 0x20) == ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10)) ? ((((st64) ((ut64) ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff)))))) * extract64(((ut64) ((ut16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10)) << 0x1) >> 0x10) : h_tmp331) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_191 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_192 = LOGNOT(op_LSHIFT_191); + RzILOpPure *op_AND_193 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_192); + RzILOpPure *op_RSHIFT_272 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_274 = LOGAND(op_RSHIFT_272, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_285 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_288 = LOGAND(op_RSHIFT_285, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_296 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_274), DUP(op_AND_274))), CAST(32, MSB(DUP(op_AND_274)), DUP(op_AND_274)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_274)), DUP(op_AND_274))), CAST(32, MSB(DUP(op_AND_274)), DUP(op_AND_274))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_274)), DUP(op_AND_274))), CAST(32, MSB(DUP(op_AND_274)), DUP(op_AND_274)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_274)), DUP(op_AND_274))), CAST(32, MSB(DUP(op_AND_274)), DUP(op_AND_274)))))), EXTRACT64(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_288)), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_299 = SHIFTL0(CAST(64, IL_FALSE, op_MUL_296), SN(32, 1)); + RzILOpPure *op_RSHIFT_301 = SHIFTRA(op_LSHIFT_299, SN(32, 16)); + RzILOpPure *cond_360 = ITE(DUP(op_EQ_268), op_RSHIFT_301, VARL("h_tmp331")); + RzILOpPure *op_AND_362 = LOGAND(cond_360, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_366 = SHIFTL0(op_AND_362, SN(32, 0)); + RzILOpPure *op_OR_367 = LOGOR(op_AND_193, op_LSHIFT_366); + RzILOpEffect *op_ASSIGN_368 = WRITE_REG(bundle, Rdd_op, op_OR_367); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) ((ut64) ( ...; + RzILOpEffect *seq_369 = SEQN(2, seq_359, op_ASSIGN_368); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_185, seq_369); + return instruction_sequence; +} + +// Rx -= mpyi(Rs,Rt) +RzILOpEffect *hex_il_op_m2_mnaci(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = Rx - Rs * Rt; + RzILOpPure *op_MUL_3 = MUL(Rs, Rt); + RzILOpPure *op_SUB_4 = SUB(READ_REG(pkt, Rx_op, false), op_MUL_3); + RzILOpEffect *op_ASSIGN_5 = WRITE_REG(bundle, Rx_op, op_SUB_4); + + RzILOpEffect *instruction_sequence = op_ASSIGN_5; + return instruction_sequence; +} + +// Rx += mpy(Rs.h,Rt.h) +RzILOpEffect *hex_il_op_m2_mpy_acc_hh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = ((st32) ((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_ADD_22 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_19), DUP(op_MUL_19))); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(op_ADD_22), DUP(op_ADD_22))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rx += mpy(Rs.h,Rt.h):<<1 +RzILOpEffect *hex_il_op_m2_mpy_acc_hh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = ((st32) ((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), SN(32, 1)); + RzILOpPure *op_ADD_24 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_22); + RzILOpEffect *op_ASSIGN_26 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(op_ADD_24), DUP(op_ADD_24))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_26; + return instruction_sequence; +} + +// Rx += mpy(Rs.h,Rt.l) +RzILOpEffect *hex_il_op_m2_mpy_acc_hl_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = ((st32) ((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_ADD_22 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_19), DUP(op_MUL_19))); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(op_ADD_22), DUP(op_ADD_22))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rx += mpy(Rs.h,Rt.l):<<1 +RzILOpEffect *hex_il_op_m2_mpy_acc_hl_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = ((st32) ((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), SN(32, 1)); + RzILOpPure *op_ADD_24 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_22); + RzILOpEffect *op_ASSIGN_26 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(op_ADD_24), DUP(op_ADD_24))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_26; + return instruction_sequence; +} + +// Rx += mpy(Rs.l,Rt.h) +RzILOpEffect *hex_il_op_m2_mpy_acc_lh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = ((st32) ((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_ADD_22 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_19), DUP(op_MUL_19))); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(op_ADD_22), DUP(op_ADD_22))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rx += mpy(Rs.l,Rt.h):<<1 +RzILOpEffect *hex_il_op_m2_mpy_acc_lh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = ((st32) ((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), SN(32, 1)); + RzILOpPure *op_ADD_24 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_22); + RzILOpEffect *op_ASSIGN_26 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(op_ADD_24), DUP(op_ADD_24))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_26; + return instruction_sequence; +} + +// Rx += mpy(Rs.l,Rt.l) +RzILOpEffect *hex_il_op_m2_mpy_acc_ll_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = ((st32) ((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_ADD_22 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_19), DUP(op_MUL_19))); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(op_ADD_22), DUP(op_ADD_22))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rx += mpy(Rs.l,Rt.l):<<1 +RzILOpEffect *hex_il_op_m2_mpy_acc_ll_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = ((st32) ((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), SN(32, 1)); + RzILOpPure *op_ADD_24 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_22); + RzILOpEffect *op_ASSIGN_26 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(op_ADD_24), DUP(op_ADD_24))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_26; + return instruction_sequence; +} + +// Rx += mpy(Rs.h,Rt.h):sat +RzILOpEffect *hex_il_op_m2_mpy_acc_sat_hh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_74 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff))))), 0x0, 0x20) == ((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpPure *op_MUL_22 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_19), DUP(op_AND_19))), CAST(16, MSB(DUP(op_AND_19)), DUP(op_AND_19)))); + RzILOpPure *op_ADD_25 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_22), DUP(op_MUL_22))); + RzILOpPure *op_RSHIFT_34 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_36 = LOGAND(op_RSHIFT_34, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_42 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_44 = LOGAND(op_RSHIFT_42, SN(32, 0xffff)); + RzILOpPure *op_MUL_47 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_36), DUP(op_AND_36))), CAST(16, MSB(DUP(op_AND_36)), DUP(op_AND_36))), CAST(32, MSB(CAST(16, MSB(op_AND_44), DUP(op_AND_44))), CAST(16, MSB(DUP(op_AND_44)), DUP(op_AND_44)))); + RzILOpPure *op_ADD_50 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_47), DUP(op_MUL_47))); + RzILOpPure *op_EQ_51 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_25), SN(32, 0), SN(32, 0x20)), op_ADD_50); + RzILOpPure *op_RSHIFT_78 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_80 = LOGAND(op_RSHIFT_78, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_86 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_88 = LOGAND(op_RSHIFT_86, SN(32, 0xffff)); + RzILOpPure *op_MUL_91 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_80), DUP(op_AND_80))), CAST(16, MSB(DUP(op_AND_80)), DUP(op_AND_80))), CAST(32, MSB(CAST(16, MSB(op_AND_88), DUP(op_AND_88))), CAST(16, MSB(DUP(op_AND_88)), DUP(op_AND_88)))); + RzILOpPure *op_ADD_94 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_91), DUP(op_MUL_91))); + RzILOpPure *op_LT_97 = SLT(op_ADD_94, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_102 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_103 = NEG(op_LSHIFT_102); + RzILOpPure *op_LSHIFT_108 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_111 = SUB(op_LSHIFT_108, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_112 = ITE(op_LT_97, op_NEG_103, op_SUB_111); + RzILOpEffect *gcc_expr_113 = BRANCH(op_EQ_51, EMPTY(), set_usr_field_call_74); + + // h_tmp332 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff))))), 0x0, 0x20) == ((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_115 = SETL("h_tmp332", cond_112); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) + ((st64) ( ...; + RzILOpEffect *seq_116 = SEQN(2, gcc_expr_113, op_ASSIGN_hybrid_tmp_115); + + // Rx = ((st32) ((sextract64(((ut64) ((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff))))), 0x0, 0x20) == ((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff))))) ? ((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) : h_tmp332)); + RzILOpPure *op_RSHIFT_55 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_57 = LOGAND(op_RSHIFT_55, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_63 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_65 = LOGAND(op_RSHIFT_63, SN(32, 0xffff)); + RzILOpPure *op_MUL_68 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_57), DUP(op_AND_57))), CAST(16, MSB(DUP(op_AND_57)), DUP(op_AND_57))), CAST(32, MSB(CAST(16, MSB(op_AND_65), DUP(op_AND_65))), CAST(16, MSB(DUP(op_AND_65)), DUP(op_AND_65)))); + RzILOpPure *op_ADD_71 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_68), DUP(op_MUL_68))); + RzILOpPure *cond_117 = ITE(DUP(op_EQ_51), op_ADD_71, VARL("h_tmp332")); + RzILOpEffect *op_ASSIGN_119 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(cond_117), DUP(cond_117))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) + ((st6 ...; + RzILOpEffect *seq_120 = SEQN(2, seq_116, op_ASSIGN_119); + + RzILOpEffect *instruction_sequence = seq_120; + return instruction_sequence; +} + +// Rx += mpy(Rs.h,Rt.h):<<1:sat +RzILOpEffect *hex_il_op_m2_mpy_acc_sat_hh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_80 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpPure *op_MUL_22 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_19), DUP(op_AND_19))), CAST(16, MSB(DUP(op_AND_19)), DUP(op_AND_19)))); + RzILOpPure *op_LSHIFT_25 = SHIFTL0(CAST(64, MSB(op_MUL_22), DUP(op_MUL_22)), SN(32, 1)); + RzILOpPure *op_ADD_27 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_25); + RzILOpPure *op_RSHIFT_36 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_36, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_44 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_46 = LOGAND(op_RSHIFT_44, SN(32, 0xffff)); + RzILOpPure *op_MUL_49 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_38), DUP(op_AND_38))), CAST(16, MSB(DUP(op_AND_38)), DUP(op_AND_38))), CAST(32, MSB(CAST(16, MSB(op_AND_46), DUP(op_AND_46))), CAST(16, MSB(DUP(op_AND_46)), DUP(op_AND_46)))); + RzILOpPure *op_LSHIFT_52 = SHIFTL0(CAST(64, MSB(op_MUL_49), DUP(op_MUL_49)), SN(32, 1)); + RzILOpPure *op_ADD_54 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_52); + RzILOpPure *op_EQ_55 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_27), SN(32, 0), SN(32, 0x20)), op_ADD_54); + RzILOpPure *op_RSHIFT_84 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_86 = LOGAND(op_RSHIFT_84, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_92 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_94 = LOGAND(op_RSHIFT_92, SN(32, 0xffff)); + RzILOpPure *op_MUL_97 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_86), DUP(op_AND_86))), CAST(16, MSB(DUP(op_AND_86)), DUP(op_AND_86))), CAST(32, MSB(CAST(16, MSB(op_AND_94), DUP(op_AND_94))), CAST(16, MSB(DUP(op_AND_94)), DUP(op_AND_94)))); + RzILOpPure *op_LSHIFT_100 = SHIFTL0(CAST(64, MSB(op_MUL_97), DUP(op_MUL_97)), SN(32, 1)); + RzILOpPure *op_ADD_102 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_100); + RzILOpPure *op_LT_105 = SLT(op_ADD_102, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_110 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_111 = NEG(op_LSHIFT_110); + RzILOpPure *op_LSHIFT_116 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_119 = SUB(op_LSHIFT_116, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_120 = ITE(op_LT_105, op_NEG_111, op_SUB_119); + RzILOpEffect *gcc_expr_121 = BRANCH(op_EQ_55, EMPTY(), set_usr_field_call_80); + + // h_tmp333 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_123 = SETL("h_tmp333", cond_120); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) + (((st64) ...; + RzILOpEffect *seq_124 = SEQN(2, gcc_expr_121, op_ASSIGN_hybrid_tmp_123); + + // Rx = ((st32) ((sextract64(((ut64) ((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)) ? ((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) : h_tmp333)); + RzILOpPure *op_RSHIFT_59 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_61 = LOGAND(op_RSHIFT_59, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_67 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_69 = LOGAND(op_RSHIFT_67, SN(32, 0xffff)); + RzILOpPure *op_MUL_72 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_61), DUP(op_AND_61))), CAST(16, MSB(DUP(op_AND_61)), DUP(op_AND_61))), CAST(32, MSB(CAST(16, MSB(op_AND_69), DUP(op_AND_69))), CAST(16, MSB(DUP(op_AND_69)), DUP(op_AND_69)))); + RzILOpPure *op_LSHIFT_75 = SHIFTL0(CAST(64, MSB(op_MUL_72), DUP(op_MUL_72)), SN(32, 1)); + RzILOpPure *op_ADD_77 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_75); + RzILOpPure *cond_125 = ITE(DUP(op_EQ_55), op_ADD_77, VARL("h_tmp333")); + RzILOpEffect *op_ASSIGN_127 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(cond_125), DUP(cond_125))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) + (((st ...; + RzILOpEffect *seq_128 = SEQN(2, seq_124, op_ASSIGN_127); + + RzILOpEffect *instruction_sequence = seq_128; + return instruction_sequence; +} + +// Rx += mpy(Rs.h,Rt.l):sat +RzILOpEffect *hex_il_op_m2_mpy_acc_sat_hl_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_74 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff))))), 0x0, 0x20) == ((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpPure *op_MUL_22 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_19), DUP(op_AND_19))), CAST(16, MSB(DUP(op_AND_19)), DUP(op_AND_19)))); + RzILOpPure *op_ADD_25 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_22), DUP(op_MUL_22))); + RzILOpPure *op_RSHIFT_34 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_36 = LOGAND(op_RSHIFT_34, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_42 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_44 = LOGAND(op_RSHIFT_42, SN(32, 0xffff)); + RzILOpPure *op_MUL_47 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_36), DUP(op_AND_36))), CAST(16, MSB(DUP(op_AND_36)), DUP(op_AND_36))), CAST(32, MSB(CAST(16, MSB(op_AND_44), DUP(op_AND_44))), CAST(16, MSB(DUP(op_AND_44)), DUP(op_AND_44)))); + RzILOpPure *op_ADD_50 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_47), DUP(op_MUL_47))); + RzILOpPure *op_EQ_51 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_25), SN(32, 0), SN(32, 0x20)), op_ADD_50); + RzILOpPure *op_RSHIFT_78 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_80 = LOGAND(op_RSHIFT_78, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_86 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_88 = LOGAND(op_RSHIFT_86, SN(32, 0xffff)); + RzILOpPure *op_MUL_91 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_80), DUP(op_AND_80))), CAST(16, MSB(DUP(op_AND_80)), DUP(op_AND_80))), CAST(32, MSB(CAST(16, MSB(op_AND_88), DUP(op_AND_88))), CAST(16, MSB(DUP(op_AND_88)), DUP(op_AND_88)))); + RzILOpPure *op_ADD_94 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_91), DUP(op_MUL_91))); + RzILOpPure *op_LT_97 = SLT(op_ADD_94, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_102 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_103 = NEG(op_LSHIFT_102); + RzILOpPure *op_LSHIFT_108 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_111 = SUB(op_LSHIFT_108, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_112 = ITE(op_LT_97, op_NEG_103, op_SUB_111); + RzILOpEffect *gcc_expr_113 = BRANCH(op_EQ_51, EMPTY(), set_usr_field_call_74); + + // h_tmp334 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff))))), 0x0, 0x20) == ((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_115 = SETL("h_tmp334", cond_112); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) + ((st64) ( ...; + RzILOpEffect *seq_116 = SEQN(2, gcc_expr_113, op_ASSIGN_hybrid_tmp_115); + + // Rx = ((st32) ((sextract64(((ut64) ((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff))))), 0x0, 0x20) == ((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff))))) ? ((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) : h_tmp334)); + RzILOpPure *op_RSHIFT_55 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_57 = LOGAND(op_RSHIFT_55, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_63 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_65 = LOGAND(op_RSHIFT_63, SN(32, 0xffff)); + RzILOpPure *op_MUL_68 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_57), DUP(op_AND_57))), CAST(16, MSB(DUP(op_AND_57)), DUP(op_AND_57))), CAST(32, MSB(CAST(16, MSB(op_AND_65), DUP(op_AND_65))), CAST(16, MSB(DUP(op_AND_65)), DUP(op_AND_65)))); + RzILOpPure *op_ADD_71 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_68), DUP(op_MUL_68))); + RzILOpPure *cond_117 = ITE(DUP(op_EQ_51), op_ADD_71, VARL("h_tmp334")); + RzILOpEffect *op_ASSIGN_119 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(cond_117), DUP(cond_117))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) + ((st6 ...; + RzILOpEffect *seq_120 = SEQN(2, seq_116, op_ASSIGN_119); + + RzILOpEffect *instruction_sequence = seq_120; + return instruction_sequence; +} + +// Rx += mpy(Rs.h,Rt.l):<<1:sat +RzILOpEffect *hex_il_op_m2_mpy_acc_sat_hl_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_80 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpPure *op_MUL_22 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_19), DUP(op_AND_19))), CAST(16, MSB(DUP(op_AND_19)), DUP(op_AND_19)))); + RzILOpPure *op_LSHIFT_25 = SHIFTL0(CAST(64, MSB(op_MUL_22), DUP(op_MUL_22)), SN(32, 1)); + RzILOpPure *op_ADD_27 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_25); + RzILOpPure *op_RSHIFT_36 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_36, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_44 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_46 = LOGAND(op_RSHIFT_44, SN(32, 0xffff)); + RzILOpPure *op_MUL_49 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_38), DUP(op_AND_38))), CAST(16, MSB(DUP(op_AND_38)), DUP(op_AND_38))), CAST(32, MSB(CAST(16, MSB(op_AND_46), DUP(op_AND_46))), CAST(16, MSB(DUP(op_AND_46)), DUP(op_AND_46)))); + RzILOpPure *op_LSHIFT_52 = SHIFTL0(CAST(64, MSB(op_MUL_49), DUP(op_MUL_49)), SN(32, 1)); + RzILOpPure *op_ADD_54 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_52); + RzILOpPure *op_EQ_55 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_27), SN(32, 0), SN(32, 0x20)), op_ADD_54); + RzILOpPure *op_RSHIFT_84 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_86 = LOGAND(op_RSHIFT_84, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_92 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_94 = LOGAND(op_RSHIFT_92, SN(32, 0xffff)); + RzILOpPure *op_MUL_97 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_86), DUP(op_AND_86))), CAST(16, MSB(DUP(op_AND_86)), DUP(op_AND_86))), CAST(32, MSB(CAST(16, MSB(op_AND_94), DUP(op_AND_94))), CAST(16, MSB(DUP(op_AND_94)), DUP(op_AND_94)))); + RzILOpPure *op_LSHIFT_100 = SHIFTL0(CAST(64, MSB(op_MUL_97), DUP(op_MUL_97)), SN(32, 1)); + RzILOpPure *op_ADD_102 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_100); + RzILOpPure *op_LT_105 = SLT(op_ADD_102, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_110 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_111 = NEG(op_LSHIFT_110); + RzILOpPure *op_LSHIFT_116 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_119 = SUB(op_LSHIFT_116, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_120 = ITE(op_LT_105, op_NEG_111, op_SUB_119); + RzILOpEffect *gcc_expr_121 = BRANCH(op_EQ_55, EMPTY(), set_usr_field_call_80); + + // h_tmp335 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_123 = SETL("h_tmp335", cond_120); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) + (((st64) ...; + RzILOpEffect *seq_124 = SEQN(2, gcc_expr_121, op_ASSIGN_hybrid_tmp_123); + + // Rx = ((st32) ((sextract64(((ut64) ((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)) ? ((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) : h_tmp335)); + RzILOpPure *op_RSHIFT_59 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_61 = LOGAND(op_RSHIFT_59, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_67 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_69 = LOGAND(op_RSHIFT_67, SN(32, 0xffff)); + RzILOpPure *op_MUL_72 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_61), DUP(op_AND_61))), CAST(16, MSB(DUP(op_AND_61)), DUP(op_AND_61))), CAST(32, MSB(CAST(16, MSB(op_AND_69), DUP(op_AND_69))), CAST(16, MSB(DUP(op_AND_69)), DUP(op_AND_69)))); + RzILOpPure *op_LSHIFT_75 = SHIFTL0(CAST(64, MSB(op_MUL_72), DUP(op_MUL_72)), SN(32, 1)); + RzILOpPure *op_ADD_77 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_75); + RzILOpPure *cond_125 = ITE(DUP(op_EQ_55), op_ADD_77, VARL("h_tmp335")); + RzILOpEffect *op_ASSIGN_127 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(cond_125), DUP(cond_125))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) + (((st ...; + RzILOpEffect *seq_128 = SEQN(2, seq_124, op_ASSIGN_127); + + RzILOpEffect *instruction_sequence = seq_128; + return instruction_sequence; +} + +// Rx += mpy(Rs.l,Rt.h):sat +RzILOpEffect *hex_il_op_m2_mpy_acc_sat_lh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_74 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff))))), 0x0, 0x20) == ((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpPure *op_MUL_22 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_19), DUP(op_AND_19))), CAST(16, MSB(DUP(op_AND_19)), DUP(op_AND_19)))); + RzILOpPure *op_ADD_25 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_22), DUP(op_MUL_22))); + RzILOpPure *op_RSHIFT_34 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_36 = LOGAND(op_RSHIFT_34, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_42 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_44 = LOGAND(op_RSHIFT_42, SN(32, 0xffff)); + RzILOpPure *op_MUL_47 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_36), DUP(op_AND_36))), CAST(16, MSB(DUP(op_AND_36)), DUP(op_AND_36))), CAST(32, MSB(CAST(16, MSB(op_AND_44), DUP(op_AND_44))), CAST(16, MSB(DUP(op_AND_44)), DUP(op_AND_44)))); + RzILOpPure *op_ADD_50 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_47), DUP(op_MUL_47))); + RzILOpPure *op_EQ_51 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_25), SN(32, 0), SN(32, 0x20)), op_ADD_50); + RzILOpPure *op_RSHIFT_78 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_80 = LOGAND(op_RSHIFT_78, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_86 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_88 = LOGAND(op_RSHIFT_86, SN(32, 0xffff)); + RzILOpPure *op_MUL_91 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_80), DUP(op_AND_80))), CAST(16, MSB(DUP(op_AND_80)), DUP(op_AND_80))), CAST(32, MSB(CAST(16, MSB(op_AND_88), DUP(op_AND_88))), CAST(16, MSB(DUP(op_AND_88)), DUP(op_AND_88)))); + RzILOpPure *op_ADD_94 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_91), DUP(op_MUL_91))); + RzILOpPure *op_LT_97 = SLT(op_ADD_94, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_102 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_103 = NEG(op_LSHIFT_102); + RzILOpPure *op_LSHIFT_108 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_111 = SUB(op_LSHIFT_108, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_112 = ITE(op_LT_97, op_NEG_103, op_SUB_111); + RzILOpEffect *gcc_expr_113 = BRANCH(op_EQ_51, EMPTY(), set_usr_field_call_74); + + // h_tmp336 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff))))), 0x0, 0x20) == ((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_115 = SETL("h_tmp336", cond_112); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) + ((st64) ( ...; + RzILOpEffect *seq_116 = SEQN(2, gcc_expr_113, op_ASSIGN_hybrid_tmp_115); + + // Rx = ((st32) ((sextract64(((ut64) ((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff))))), 0x0, 0x20) == ((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff))))) ? ((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) : h_tmp336)); + RzILOpPure *op_RSHIFT_55 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_57 = LOGAND(op_RSHIFT_55, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_63 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_65 = LOGAND(op_RSHIFT_63, SN(32, 0xffff)); + RzILOpPure *op_MUL_68 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_57), DUP(op_AND_57))), CAST(16, MSB(DUP(op_AND_57)), DUP(op_AND_57))), CAST(32, MSB(CAST(16, MSB(op_AND_65), DUP(op_AND_65))), CAST(16, MSB(DUP(op_AND_65)), DUP(op_AND_65)))); + RzILOpPure *op_ADD_71 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_68), DUP(op_MUL_68))); + RzILOpPure *cond_117 = ITE(DUP(op_EQ_51), op_ADD_71, VARL("h_tmp336")); + RzILOpEffect *op_ASSIGN_119 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(cond_117), DUP(cond_117))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) + ((st6 ...; + RzILOpEffect *seq_120 = SEQN(2, seq_116, op_ASSIGN_119); + + RzILOpEffect *instruction_sequence = seq_120; + return instruction_sequence; +} + +// Rx += mpy(Rs.l,Rt.h):<<1:sat +RzILOpEffect *hex_il_op_m2_mpy_acc_sat_lh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_80 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpPure *op_MUL_22 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_19), DUP(op_AND_19))), CAST(16, MSB(DUP(op_AND_19)), DUP(op_AND_19)))); + RzILOpPure *op_LSHIFT_25 = SHIFTL0(CAST(64, MSB(op_MUL_22), DUP(op_MUL_22)), SN(32, 1)); + RzILOpPure *op_ADD_27 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_25); + RzILOpPure *op_RSHIFT_36 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_36, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_44 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_46 = LOGAND(op_RSHIFT_44, SN(32, 0xffff)); + RzILOpPure *op_MUL_49 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_38), DUP(op_AND_38))), CAST(16, MSB(DUP(op_AND_38)), DUP(op_AND_38))), CAST(32, MSB(CAST(16, MSB(op_AND_46), DUP(op_AND_46))), CAST(16, MSB(DUP(op_AND_46)), DUP(op_AND_46)))); + RzILOpPure *op_LSHIFT_52 = SHIFTL0(CAST(64, MSB(op_MUL_49), DUP(op_MUL_49)), SN(32, 1)); + RzILOpPure *op_ADD_54 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_52); + RzILOpPure *op_EQ_55 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_27), SN(32, 0), SN(32, 0x20)), op_ADD_54); + RzILOpPure *op_RSHIFT_84 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_86 = LOGAND(op_RSHIFT_84, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_92 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_94 = LOGAND(op_RSHIFT_92, SN(32, 0xffff)); + RzILOpPure *op_MUL_97 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_86), DUP(op_AND_86))), CAST(16, MSB(DUP(op_AND_86)), DUP(op_AND_86))), CAST(32, MSB(CAST(16, MSB(op_AND_94), DUP(op_AND_94))), CAST(16, MSB(DUP(op_AND_94)), DUP(op_AND_94)))); + RzILOpPure *op_LSHIFT_100 = SHIFTL0(CAST(64, MSB(op_MUL_97), DUP(op_MUL_97)), SN(32, 1)); + RzILOpPure *op_ADD_102 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_100); + RzILOpPure *op_LT_105 = SLT(op_ADD_102, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_110 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_111 = NEG(op_LSHIFT_110); + RzILOpPure *op_LSHIFT_116 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_119 = SUB(op_LSHIFT_116, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_120 = ITE(op_LT_105, op_NEG_111, op_SUB_119); + RzILOpEffect *gcc_expr_121 = BRANCH(op_EQ_55, EMPTY(), set_usr_field_call_80); + + // h_tmp337 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_123 = SETL("h_tmp337", cond_120); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) + (((st64) ...; + RzILOpEffect *seq_124 = SEQN(2, gcc_expr_121, op_ASSIGN_hybrid_tmp_123); + + // Rx = ((st32) ((sextract64(((ut64) ((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)) ? ((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) : h_tmp337)); + RzILOpPure *op_RSHIFT_59 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_61 = LOGAND(op_RSHIFT_59, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_67 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_69 = LOGAND(op_RSHIFT_67, SN(32, 0xffff)); + RzILOpPure *op_MUL_72 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_61), DUP(op_AND_61))), CAST(16, MSB(DUP(op_AND_61)), DUP(op_AND_61))), CAST(32, MSB(CAST(16, MSB(op_AND_69), DUP(op_AND_69))), CAST(16, MSB(DUP(op_AND_69)), DUP(op_AND_69)))); + RzILOpPure *op_LSHIFT_75 = SHIFTL0(CAST(64, MSB(op_MUL_72), DUP(op_MUL_72)), SN(32, 1)); + RzILOpPure *op_ADD_77 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_75); + RzILOpPure *cond_125 = ITE(DUP(op_EQ_55), op_ADD_77, VARL("h_tmp337")); + RzILOpEffect *op_ASSIGN_127 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(cond_125), DUP(cond_125))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) + (((st ...; + RzILOpEffect *seq_128 = SEQN(2, seq_124, op_ASSIGN_127); + + RzILOpEffect *instruction_sequence = seq_128; + return instruction_sequence; +} + +// Rx += mpy(Rs.l,Rt.l):sat +RzILOpEffect *hex_il_op_m2_mpy_acc_sat_ll_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_74 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff))))), 0x0, 0x20) == ((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpPure *op_MUL_22 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_19), DUP(op_AND_19))), CAST(16, MSB(DUP(op_AND_19)), DUP(op_AND_19)))); + RzILOpPure *op_ADD_25 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_22), DUP(op_MUL_22))); + RzILOpPure *op_RSHIFT_34 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_36 = LOGAND(op_RSHIFT_34, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_42 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_44 = LOGAND(op_RSHIFT_42, SN(32, 0xffff)); + RzILOpPure *op_MUL_47 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_36), DUP(op_AND_36))), CAST(16, MSB(DUP(op_AND_36)), DUP(op_AND_36))), CAST(32, MSB(CAST(16, MSB(op_AND_44), DUP(op_AND_44))), CAST(16, MSB(DUP(op_AND_44)), DUP(op_AND_44)))); + RzILOpPure *op_ADD_50 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_47), DUP(op_MUL_47))); + RzILOpPure *op_EQ_51 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_25), SN(32, 0), SN(32, 0x20)), op_ADD_50); + RzILOpPure *op_RSHIFT_78 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_80 = LOGAND(op_RSHIFT_78, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_86 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_88 = LOGAND(op_RSHIFT_86, SN(32, 0xffff)); + RzILOpPure *op_MUL_91 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_80), DUP(op_AND_80))), CAST(16, MSB(DUP(op_AND_80)), DUP(op_AND_80))), CAST(32, MSB(CAST(16, MSB(op_AND_88), DUP(op_AND_88))), CAST(16, MSB(DUP(op_AND_88)), DUP(op_AND_88)))); + RzILOpPure *op_ADD_94 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_91), DUP(op_MUL_91))); + RzILOpPure *op_LT_97 = SLT(op_ADD_94, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_102 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_103 = NEG(op_LSHIFT_102); + RzILOpPure *op_LSHIFT_108 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_111 = SUB(op_LSHIFT_108, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_112 = ITE(op_LT_97, op_NEG_103, op_SUB_111); + RzILOpEffect *gcc_expr_113 = BRANCH(op_EQ_51, EMPTY(), set_usr_field_call_74); + + // h_tmp338 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff))))), 0x0, 0x20) == ((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_115 = SETL("h_tmp338", cond_112); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) + ((st64) ( ...; + RzILOpEffect *seq_116 = SEQN(2, gcc_expr_113, op_ASSIGN_hybrid_tmp_115); + + // Rx = ((st32) ((sextract64(((ut64) ((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff))))), 0x0, 0x20) == ((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff))))) ? ((st64) Rx) + ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) : h_tmp338)); + RzILOpPure *op_RSHIFT_55 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_57 = LOGAND(op_RSHIFT_55, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_63 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_65 = LOGAND(op_RSHIFT_63, SN(32, 0xffff)); + RzILOpPure *op_MUL_68 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_57), DUP(op_AND_57))), CAST(16, MSB(DUP(op_AND_57)), DUP(op_AND_57))), CAST(32, MSB(CAST(16, MSB(op_AND_65), DUP(op_AND_65))), CAST(16, MSB(DUP(op_AND_65)), DUP(op_AND_65)))); + RzILOpPure *op_ADD_71 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_68), DUP(op_MUL_68))); + RzILOpPure *cond_117 = ITE(DUP(op_EQ_51), op_ADD_71, VARL("h_tmp338")); + RzILOpEffect *op_ASSIGN_119 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(cond_117), DUP(cond_117))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) + ((st6 ...; + RzILOpEffect *seq_120 = SEQN(2, seq_116, op_ASSIGN_119); + + RzILOpEffect *instruction_sequence = seq_120; + return instruction_sequence; +} + +// Rx += mpy(Rs.l,Rt.l):<<1:sat +RzILOpEffect *hex_il_op_m2_mpy_acc_sat_ll_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_80 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpPure *op_MUL_22 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_19), DUP(op_AND_19))), CAST(16, MSB(DUP(op_AND_19)), DUP(op_AND_19)))); + RzILOpPure *op_LSHIFT_25 = SHIFTL0(CAST(64, MSB(op_MUL_22), DUP(op_MUL_22)), SN(32, 1)); + RzILOpPure *op_ADD_27 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_25); + RzILOpPure *op_RSHIFT_36 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_36, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_44 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_46 = LOGAND(op_RSHIFT_44, SN(32, 0xffff)); + RzILOpPure *op_MUL_49 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_38), DUP(op_AND_38))), CAST(16, MSB(DUP(op_AND_38)), DUP(op_AND_38))), CAST(32, MSB(CAST(16, MSB(op_AND_46), DUP(op_AND_46))), CAST(16, MSB(DUP(op_AND_46)), DUP(op_AND_46)))); + RzILOpPure *op_LSHIFT_52 = SHIFTL0(CAST(64, MSB(op_MUL_49), DUP(op_MUL_49)), SN(32, 1)); + RzILOpPure *op_ADD_54 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_52); + RzILOpPure *op_EQ_55 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_27), SN(32, 0), SN(32, 0x20)), op_ADD_54); + RzILOpPure *op_RSHIFT_84 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_86 = LOGAND(op_RSHIFT_84, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_92 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_94 = LOGAND(op_RSHIFT_92, SN(32, 0xffff)); + RzILOpPure *op_MUL_97 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_86), DUP(op_AND_86))), CAST(16, MSB(DUP(op_AND_86)), DUP(op_AND_86))), CAST(32, MSB(CAST(16, MSB(op_AND_94), DUP(op_AND_94))), CAST(16, MSB(DUP(op_AND_94)), DUP(op_AND_94)))); + RzILOpPure *op_LSHIFT_100 = SHIFTL0(CAST(64, MSB(op_MUL_97), DUP(op_MUL_97)), SN(32, 1)); + RzILOpPure *op_ADD_102 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_100); + RzILOpPure *op_LT_105 = SLT(op_ADD_102, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_110 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_111 = NEG(op_LSHIFT_110); + RzILOpPure *op_LSHIFT_116 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_119 = SUB(op_LSHIFT_116, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_120 = ITE(op_LT_105, op_NEG_111, op_SUB_119); + RzILOpEffect *gcc_expr_121 = BRANCH(op_EQ_55, EMPTY(), set_usr_field_call_80); + + // h_tmp339 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_123 = SETL("h_tmp339", cond_120); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) + (((st64) ...; + RzILOpEffect *seq_124 = SEQN(2, gcc_expr_121, op_ASSIGN_hybrid_tmp_123); + + // Rx = ((st32) ((sextract64(((ut64) ((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)) ? ((st64) Rx) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) : h_tmp339)); + RzILOpPure *op_RSHIFT_59 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_61 = LOGAND(op_RSHIFT_59, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_67 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_69 = LOGAND(op_RSHIFT_67, SN(32, 0xffff)); + RzILOpPure *op_MUL_72 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_61), DUP(op_AND_61))), CAST(16, MSB(DUP(op_AND_61)), DUP(op_AND_61))), CAST(32, MSB(CAST(16, MSB(op_AND_69), DUP(op_AND_69))), CAST(16, MSB(DUP(op_AND_69)), DUP(op_AND_69)))); + RzILOpPure *op_LSHIFT_75 = SHIFTL0(CAST(64, MSB(op_MUL_72), DUP(op_MUL_72)), SN(32, 1)); + RzILOpPure *op_ADD_77 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_75); + RzILOpPure *cond_125 = ITE(DUP(op_EQ_55), op_ADD_77, VARL("h_tmp339")); + RzILOpEffect *op_ASSIGN_127 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(cond_125), DUP(cond_125))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) + (((st ...; + RzILOpEffect *seq_128 = SEQN(2, seq_124, op_ASSIGN_127); + + RzILOpEffect *instruction_sequence = seq_128; + return instruction_sequence; +} + +// Rd = mpy(Rs.h,Rt.h) +RzILOpEffect *hex_il_op_m2_mpy_hh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((st32) ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19))), CAST(64, MSB(DUP(op_MUL_19)), DUP(op_MUL_19)))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Rd = mpy(Rs.h,Rt.h):<<1 +RzILOpEffect *hex_il_op_m2_mpy_hh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((st32) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), SN(32, 1)); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(op_LSHIFT_22), DUP(op_LSHIFT_22))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rd = mpy(Rs.h,Rt.l) +RzILOpEffect *hex_il_op_m2_mpy_hl_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((st32) ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19))), CAST(64, MSB(DUP(op_MUL_19)), DUP(op_MUL_19)))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Rd = mpy(Rs.h,Rt.l):<<1 +RzILOpEffect *hex_il_op_m2_mpy_hl_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((st32) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), SN(32, 1)); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(op_LSHIFT_22), DUP(op_LSHIFT_22))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rd = mpy(Rs.l,Rt.h) +RzILOpEffect *hex_il_op_m2_mpy_lh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((st32) ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19))), CAST(64, MSB(DUP(op_MUL_19)), DUP(op_MUL_19)))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Rd = mpy(Rs.l,Rt.h):<<1 +RzILOpEffect *hex_il_op_m2_mpy_lh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((st32) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), SN(32, 1)); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(op_LSHIFT_22), DUP(op_LSHIFT_22))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rd = mpy(Rs.l,Rt.l) +RzILOpEffect *hex_il_op_m2_mpy_ll_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((st32) ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19))), CAST(64, MSB(DUP(op_MUL_19)), DUP(op_MUL_19)))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Rd = mpy(Rs.l,Rt.l):<<1 +RzILOpEffect *hex_il_op_m2_mpy_ll_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((st32) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), SN(32, 1)); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(op_LSHIFT_22), DUP(op_LSHIFT_22))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rx -= mpy(Rs.h,Rt.h) +RzILOpEffect *hex_il_op_m2_mpy_nac_hh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = ((st32) ((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_SUB_22 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_19), DUP(op_MUL_19))); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(op_SUB_22), DUP(op_SUB_22))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rx -= mpy(Rs.h,Rt.h):<<1 +RzILOpEffect *hex_il_op_m2_mpy_nac_hh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = ((st32) ((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), SN(32, 1)); + RzILOpPure *op_SUB_24 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_22); + RzILOpEffect *op_ASSIGN_26 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(op_SUB_24), DUP(op_SUB_24))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_26; + return instruction_sequence; +} + +// Rx -= mpy(Rs.h,Rt.l) +RzILOpEffect *hex_il_op_m2_mpy_nac_hl_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = ((st32) ((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_SUB_22 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_19), DUP(op_MUL_19))); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(op_SUB_22), DUP(op_SUB_22))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rx -= mpy(Rs.h,Rt.l):<<1 +RzILOpEffect *hex_il_op_m2_mpy_nac_hl_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = ((st32) ((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), SN(32, 1)); + RzILOpPure *op_SUB_24 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_22); + RzILOpEffect *op_ASSIGN_26 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(op_SUB_24), DUP(op_SUB_24))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_26; + return instruction_sequence; +} + +// Rx -= mpy(Rs.l,Rt.h) +RzILOpEffect *hex_il_op_m2_mpy_nac_lh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = ((st32) ((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_SUB_22 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_19), DUP(op_MUL_19))); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(op_SUB_22), DUP(op_SUB_22))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rx -= mpy(Rs.l,Rt.h):<<1 +RzILOpEffect *hex_il_op_m2_mpy_nac_lh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = ((st32) ((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), SN(32, 1)); + RzILOpPure *op_SUB_24 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_22); + RzILOpEffect *op_ASSIGN_26 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(op_SUB_24), DUP(op_SUB_24))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_26; + return instruction_sequence; +} + +// Rx -= mpy(Rs.l,Rt.l) +RzILOpEffect *hex_il_op_m2_mpy_nac_ll_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = ((st32) ((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_SUB_22 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_19), DUP(op_MUL_19))); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(op_SUB_22), DUP(op_SUB_22))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rx -= mpy(Rs.l,Rt.l):<<1 +RzILOpEffect *hex_il_op_m2_mpy_nac_ll_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = ((st32) ((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), SN(32, 1)); + RzILOpPure *op_SUB_24 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_22); + RzILOpEffect *op_ASSIGN_26 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(op_SUB_24), DUP(op_SUB_24))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_26; + return instruction_sequence; +} + +// Rx -= mpy(Rs.h,Rt.h):sat +RzILOpEffect *hex_il_op_m2_mpy_nac_sat_hh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_74 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff))))), 0x0, 0x20) == ((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpPure *op_MUL_22 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_19), DUP(op_AND_19))), CAST(16, MSB(DUP(op_AND_19)), DUP(op_AND_19)))); + RzILOpPure *op_SUB_25 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_22), DUP(op_MUL_22))); + RzILOpPure *op_RSHIFT_34 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_36 = LOGAND(op_RSHIFT_34, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_42 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_44 = LOGAND(op_RSHIFT_42, SN(32, 0xffff)); + RzILOpPure *op_MUL_47 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_36), DUP(op_AND_36))), CAST(16, MSB(DUP(op_AND_36)), DUP(op_AND_36))), CAST(32, MSB(CAST(16, MSB(op_AND_44), DUP(op_AND_44))), CAST(16, MSB(DUP(op_AND_44)), DUP(op_AND_44)))); + RzILOpPure *op_SUB_50 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_47), DUP(op_MUL_47))); + RzILOpPure *op_EQ_51 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_25), SN(32, 0), SN(32, 0x20)), op_SUB_50); + RzILOpPure *op_RSHIFT_78 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_80 = LOGAND(op_RSHIFT_78, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_86 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_88 = LOGAND(op_RSHIFT_86, SN(32, 0xffff)); + RzILOpPure *op_MUL_91 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_80), DUP(op_AND_80))), CAST(16, MSB(DUP(op_AND_80)), DUP(op_AND_80))), CAST(32, MSB(CAST(16, MSB(op_AND_88), DUP(op_AND_88))), CAST(16, MSB(DUP(op_AND_88)), DUP(op_AND_88)))); + RzILOpPure *op_SUB_94 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_91), DUP(op_MUL_91))); + RzILOpPure *op_LT_97 = SLT(op_SUB_94, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_102 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_103 = NEG(op_LSHIFT_102); + RzILOpPure *op_LSHIFT_108 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_111 = SUB(op_LSHIFT_108, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_112 = ITE(op_LT_97, op_NEG_103, op_SUB_111); + RzILOpEffect *gcc_expr_113 = BRANCH(op_EQ_51, EMPTY(), set_usr_field_call_74); + + // h_tmp340 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff))))), 0x0, 0x20) == ((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_115 = SETL("h_tmp340", cond_112); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) - ((st64) ( ...; + RzILOpEffect *seq_116 = SEQN(2, gcc_expr_113, op_ASSIGN_hybrid_tmp_115); + + // Rx = ((st32) ((sextract64(((ut64) ((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff))))), 0x0, 0x20) == ((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff))))) ? ((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) : h_tmp340)); + RzILOpPure *op_RSHIFT_55 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_57 = LOGAND(op_RSHIFT_55, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_63 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_65 = LOGAND(op_RSHIFT_63, SN(32, 0xffff)); + RzILOpPure *op_MUL_68 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_57), DUP(op_AND_57))), CAST(16, MSB(DUP(op_AND_57)), DUP(op_AND_57))), CAST(32, MSB(CAST(16, MSB(op_AND_65), DUP(op_AND_65))), CAST(16, MSB(DUP(op_AND_65)), DUP(op_AND_65)))); + RzILOpPure *op_SUB_71 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_68), DUP(op_MUL_68))); + RzILOpPure *cond_117 = ITE(DUP(op_EQ_51), op_SUB_71, VARL("h_tmp340")); + RzILOpEffect *op_ASSIGN_119 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(cond_117), DUP(cond_117))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) - ((st6 ...; + RzILOpEffect *seq_120 = SEQN(2, seq_116, op_ASSIGN_119); + + RzILOpEffect *instruction_sequence = seq_120; + return instruction_sequence; +} + +// Rx -= mpy(Rs.h,Rt.h):<<1:sat +RzILOpEffect *hex_il_op_m2_mpy_nac_sat_hh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_80 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpPure *op_MUL_22 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_19), DUP(op_AND_19))), CAST(16, MSB(DUP(op_AND_19)), DUP(op_AND_19)))); + RzILOpPure *op_LSHIFT_25 = SHIFTL0(CAST(64, MSB(op_MUL_22), DUP(op_MUL_22)), SN(32, 1)); + RzILOpPure *op_SUB_27 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_25); + RzILOpPure *op_RSHIFT_36 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_36, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_44 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_46 = LOGAND(op_RSHIFT_44, SN(32, 0xffff)); + RzILOpPure *op_MUL_49 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_38), DUP(op_AND_38))), CAST(16, MSB(DUP(op_AND_38)), DUP(op_AND_38))), CAST(32, MSB(CAST(16, MSB(op_AND_46), DUP(op_AND_46))), CAST(16, MSB(DUP(op_AND_46)), DUP(op_AND_46)))); + RzILOpPure *op_LSHIFT_52 = SHIFTL0(CAST(64, MSB(op_MUL_49), DUP(op_MUL_49)), SN(32, 1)); + RzILOpPure *op_SUB_54 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_52); + RzILOpPure *op_EQ_55 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_27), SN(32, 0), SN(32, 0x20)), op_SUB_54); + RzILOpPure *op_RSHIFT_84 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_86 = LOGAND(op_RSHIFT_84, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_92 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_94 = LOGAND(op_RSHIFT_92, SN(32, 0xffff)); + RzILOpPure *op_MUL_97 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_86), DUP(op_AND_86))), CAST(16, MSB(DUP(op_AND_86)), DUP(op_AND_86))), CAST(32, MSB(CAST(16, MSB(op_AND_94), DUP(op_AND_94))), CAST(16, MSB(DUP(op_AND_94)), DUP(op_AND_94)))); + RzILOpPure *op_LSHIFT_100 = SHIFTL0(CAST(64, MSB(op_MUL_97), DUP(op_MUL_97)), SN(32, 1)); + RzILOpPure *op_SUB_102 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_100); + RzILOpPure *op_LT_105 = SLT(op_SUB_102, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_110 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_111 = NEG(op_LSHIFT_110); + RzILOpPure *op_LSHIFT_116 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_119 = SUB(op_LSHIFT_116, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_120 = ITE(op_LT_105, op_NEG_111, op_SUB_119); + RzILOpEffect *gcc_expr_121 = BRANCH(op_EQ_55, EMPTY(), set_usr_field_call_80); + + // h_tmp341 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_123 = SETL("h_tmp341", cond_120); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) - (((st64) ...; + RzILOpEffect *seq_124 = SEQN(2, gcc_expr_121, op_ASSIGN_hybrid_tmp_123); + + // Rx = ((st32) ((sextract64(((ut64) ((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)) ? ((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) : h_tmp341)); + RzILOpPure *op_RSHIFT_59 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_61 = LOGAND(op_RSHIFT_59, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_67 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_69 = LOGAND(op_RSHIFT_67, SN(32, 0xffff)); + RzILOpPure *op_MUL_72 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_61), DUP(op_AND_61))), CAST(16, MSB(DUP(op_AND_61)), DUP(op_AND_61))), CAST(32, MSB(CAST(16, MSB(op_AND_69), DUP(op_AND_69))), CAST(16, MSB(DUP(op_AND_69)), DUP(op_AND_69)))); + RzILOpPure *op_LSHIFT_75 = SHIFTL0(CAST(64, MSB(op_MUL_72), DUP(op_MUL_72)), SN(32, 1)); + RzILOpPure *op_SUB_77 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_75); + RzILOpPure *cond_125 = ITE(DUP(op_EQ_55), op_SUB_77, VARL("h_tmp341")); + RzILOpEffect *op_ASSIGN_127 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(cond_125), DUP(cond_125))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) - (((st ...; + RzILOpEffect *seq_128 = SEQN(2, seq_124, op_ASSIGN_127); + + RzILOpEffect *instruction_sequence = seq_128; + return instruction_sequence; +} + +// Rx -= mpy(Rs.h,Rt.l):sat +RzILOpEffect *hex_il_op_m2_mpy_nac_sat_hl_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_74 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff))))), 0x0, 0x20) == ((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpPure *op_MUL_22 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_19), DUP(op_AND_19))), CAST(16, MSB(DUP(op_AND_19)), DUP(op_AND_19)))); + RzILOpPure *op_SUB_25 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_22), DUP(op_MUL_22))); + RzILOpPure *op_RSHIFT_34 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_36 = LOGAND(op_RSHIFT_34, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_42 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_44 = LOGAND(op_RSHIFT_42, SN(32, 0xffff)); + RzILOpPure *op_MUL_47 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_36), DUP(op_AND_36))), CAST(16, MSB(DUP(op_AND_36)), DUP(op_AND_36))), CAST(32, MSB(CAST(16, MSB(op_AND_44), DUP(op_AND_44))), CAST(16, MSB(DUP(op_AND_44)), DUP(op_AND_44)))); + RzILOpPure *op_SUB_50 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_47), DUP(op_MUL_47))); + RzILOpPure *op_EQ_51 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_25), SN(32, 0), SN(32, 0x20)), op_SUB_50); + RzILOpPure *op_RSHIFT_78 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_80 = LOGAND(op_RSHIFT_78, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_86 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_88 = LOGAND(op_RSHIFT_86, SN(32, 0xffff)); + RzILOpPure *op_MUL_91 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_80), DUP(op_AND_80))), CAST(16, MSB(DUP(op_AND_80)), DUP(op_AND_80))), CAST(32, MSB(CAST(16, MSB(op_AND_88), DUP(op_AND_88))), CAST(16, MSB(DUP(op_AND_88)), DUP(op_AND_88)))); + RzILOpPure *op_SUB_94 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_91), DUP(op_MUL_91))); + RzILOpPure *op_LT_97 = SLT(op_SUB_94, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_102 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_103 = NEG(op_LSHIFT_102); + RzILOpPure *op_LSHIFT_108 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_111 = SUB(op_LSHIFT_108, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_112 = ITE(op_LT_97, op_NEG_103, op_SUB_111); + RzILOpEffect *gcc_expr_113 = BRANCH(op_EQ_51, EMPTY(), set_usr_field_call_74); + + // h_tmp342 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff))))), 0x0, 0x20) == ((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_115 = SETL("h_tmp342", cond_112); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) - ((st64) ( ...; + RzILOpEffect *seq_116 = SEQN(2, gcc_expr_113, op_ASSIGN_hybrid_tmp_115); + + // Rx = ((st32) ((sextract64(((ut64) ((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff))))), 0x0, 0x20) == ((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff))))) ? ((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) : h_tmp342)); + RzILOpPure *op_RSHIFT_55 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_57 = LOGAND(op_RSHIFT_55, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_63 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_65 = LOGAND(op_RSHIFT_63, SN(32, 0xffff)); + RzILOpPure *op_MUL_68 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_57), DUP(op_AND_57))), CAST(16, MSB(DUP(op_AND_57)), DUP(op_AND_57))), CAST(32, MSB(CAST(16, MSB(op_AND_65), DUP(op_AND_65))), CAST(16, MSB(DUP(op_AND_65)), DUP(op_AND_65)))); + RzILOpPure *op_SUB_71 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_68), DUP(op_MUL_68))); + RzILOpPure *cond_117 = ITE(DUP(op_EQ_51), op_SUB_71, VARL("h_tmp342")); + RzILOpEffect *op_ASSIGN_119 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(cond_117), DUP(cond_117))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) - ((st6 ...; + RzILOpEffect *seq_120 = SEQN(2, seq_116, op_ASSIGN_119); + + RzILOpEffect *instruction_sequence = seq_120; + return instruction_sequence; +} + +// Rx -= mpy(Rs.h,Rt.l):<<1:sat +RzILOpEffect *hex_il_op_m2_mpy_nac_sat_hl_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_80 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpPure *op_MUL_22 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_19), DUP(op_AND_19))), CAST(16, MSB(DUP(op_AND_19)), DUP(op_AND_19)))); + RzILOpPure *op_LSHIFT_25 = SHIFTL0(CAST(64, MSB(op_MUL_22), DUP(op_MUL_22)), SN(32, 1)); + RzILOpPure *op_SUB_27 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_25); + RzILOpPure *op_RSHIFT_36 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_36, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_44 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_46 = LOGAND(op_RSHIFT_44, SN(32, 0xffff)); + RzILOpPure *op_MUL_49 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_38), DUP(op_AND_38))), CAST(16, MSB(DUP(op_AND_38)), DUP(op_AND_38))), CAST(32, MSB(CAST(16, MSB(op_AND_46), DUP(op_AND_46))), CAST(16, MSB(DUP(op_AND_46)), DUP(op_AND_46)))); + RzILOpPure *op_LSHIFT_52 = SHIFTL0(CAST(64, MSB(op_MUL_49), DUP(op_MUL_49)), SN(32, 1)); + RzILOpPure *op_SUB_54 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_52); + RzILOpPure *op_EQ_55 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_27), SN(32, 0), SN(32, 0x20)), op_SUB_54); + RzILOpPure *op_RSHIFT_84 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_86 = LOGAND(op_RSHIFT_84, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_92 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_94 = LOGAND(op_RSHIFT_92, SN(32, 0xffff)); + RzILOpPure *op_MUL_97 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_86), DUP(op_AND_86))), CAST(16, MSB(DUP(op_AND_86)), DUP(op_AND_86))), CAST(32, MSB(CAST(16, MSB(op_AND_94), DUP(op_AND_94))), CAST(16, MSB(DUP(op_AND_94)), DUP(op_AND_94)))); + RzILOpPure *op_LSHIFT_100 = SHIFTL0(CAST(64, MSB(op_MUL_97), DUP(op_MUL_97)), SN(32, 1)); + RzILOpPure *op_SUB_102 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_100); + RzILOpPure *op_LT_105 = SLT(op_SUB_102, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_110 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_111 = NEG(op_LSHIFT_110); + RzILOpPure *op_LSHIFT_116 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_119 = SUB(op_LSHIFT_116, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_120 = ITE(op_LT_105, op_NEG_111, op_SUB_119); + RzILOpEffect *gcc_expr_121 = BRANCH(op_EQ_55, EMPTY(), set_usr_field_call_80); + + // h_tmp343 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_123 = SETL("h_tmp343", cond_120); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) - (((st64) ...; + RzILOpEffect *seq_124 = SEQN(2, gcc_expr_121, op_ASSIGN_hybrid_tmp_123); + + // Rx = ((st32) ((sextract64(((ut64) ((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)) ? ((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) : h_tmp343)); + RzILOpPure *op_RSHIFT_59 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_61 = LOGAND(op_RSHIFT_59, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_67 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_69 = LOGAND(op_RSHIFT_67, SN(32, 0xffff)); + RzILOpPure *op_MUL_72 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_61), DUP(op_AND_61))), CAST(16, MSB(DUP(op_AND_61)), DUP(op_AND_61))), CAST(32, MSB(CAST(16, MSB(op_AND_69), DUP(op_AND_69))), CAST(16, MSB(DUP(op_AND_69)), DUP(op_AND_69)))); + RzILOpPure *op_LSHIFT_75 = SHIFTL0(CAST(64, MSB(op_MUL_72), DUP(op_MUL_72)), SN(32, 1)); + RzILOpPure *op_SUB_77 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_75); + RzILOpPure *cond_125 = ITE(DUP(op_EQ_55), op_SUB_77, VARL("h_tmp343")); + RzILOpEffect *op_ASSIGN_127 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(cond_125), DUP(cond_125))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) - (((st ...; + RzILOpEffect *seq_128 = SEQN(2, seq_124, op_ASSIGN_127); + + RzILOpEffect *instruction_sequence = seq_128; + return instruction_sequence; +} + +// Rx -= mpy(Rs.l,Rt.h):sat +RzILOpEffect *hex_il_op_m2_mpy_nac_sat_lh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_74 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff))))), 0x0, 0x20) == ((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpPure *op_MUL_22 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_19), DUP(op_AND_19))), CAST(16, MSB(DUP(op_AND_19)), DUP(op_AND_19)))); + RzILOpPure *op_SUB_25 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_22), DUP(op_MUL_22))); + RzILOpPure *op_RSHIFT_34 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_36 = LOGAND(op_RSHIFT_34, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_42 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_44 = LOGAND(op_RSHIFT_42, SN(32, 0xffff)); + RzILOpPure *op_MUL_47 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_36), DUP(op_AND_36))), CAST(16, MSB(DUP(op_AND_36)), DUP(op_AND_36))), CAST(32, MSB(CAST(16, MSB(op_AND_44), DUP(op_AND_44))), CAST(16, MSB(DUP(op_AND_44)), DUP(op_AND_44)))); + RzILOpPure *op_SUB_50 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_47), DUP(op_MUL_47))); + RzILOpPure *op_EQ_51 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_25), SN(32, 0), SN(32, 0x20)), op_SUB_50); + RzILOpPure *op_RSHIFT_78 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_80 = LOGAND(op_RSHIFT_78, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_86 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_88 = LOGAND(op_RSHIFT_86, SN(32, 0xffff)); + RzILOpPure *op_MUL_91 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_80), DUP(op_AND_80))), CAST(16, MSB(DUP(op_AND_80)), DUP(op_AND_80))), CAST(32, MSB(CAST(16, MSB(op_AND_88), DUP(op_AND_88))), CAST(16, MSB(DUP(op_AND_88)), DUP(op_AND_88)))); + RzILOpPure *op_SUB_94 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_91), DUP(op_MUL_91))); + RzILOpPure *op_LT_97 = SLT(op_SUB_94, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_102 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_103 = NEG(op_LSHIFT_102); + RzILOpPure *op_LSHIFT_108 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_111 = SUB(op_LSHIFT_108, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_112 = ITE(op_LT_97, op_NEG_103, op_SUB_111); + RzILOpEffect *gcc_expr_113 = BRANCH(op_EQ_51, EMPTY(), set_usr_field_call_74); + + // h_tmp344 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff))))), 0x0, 0x20) == ((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_115 = SETL("h_tmp344", cond_112); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) - ((st64) ( ...; + RzILOpEffect *seq_116 = SEQN(2, gcc_expr_113, op_ASSIGN_hybrid_tmp_115); + + // Rx = ((st32) ((sextract64(((ut64) ((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff))))), 0x0, 0x20) == ((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff))))) ? ((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) : h_tmp344)); + RzILOpPure *op_RSHIFT_55 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_57 = LOGAND(op_RSHIFT_55, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_63 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_65 = LOGAND(op_RSHIFT_63, SN(32, 0xffff)); + RzILOpPure *op_MUL_68 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_57), DUP(op_AND_57))), CAST(16, MSB(DUP(op_AND_57)), DUP(op_AND_57))), CAST(32, MSB(CAST(16, MSB(op_AND_65), DUP(op_AND_65))), CAST(16, MSB(DUP(op_AND_65)), DUP(op_AND_65)))); + RzILOpPure *op_SUB_71 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_68), DUP(op_MUL_68))); + RzILOpPure *cond_117 = ITE(DUP(op_EQ_51), op_SUB_71, VARL("h_tmp344")); + RzILOpEffect *op_ASSIGN_119 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(cond_117), DUP(cond_117))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) - ((st6 ...; + RzILOpEffect *seq_120 = SEQN(2, seq_116, op_ASSIGN_119); + + RzILOpEffect *instruction_sequence = seq_120; + return instruction_sequence; +} + +// Rx -= mpy(Rs.l,Rt.h):<<1:sat +RzILOpEffect *hex_il_op_m2_mpy_nac_sat_lh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_80 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpPure *op_MUL_22 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_19), DUP(op_AND_19))), CAST(16, MSB(DUP(op_AND_19)), DUP(op_AND_19)))); + RzILOpPure *op_LSHIFT_25 = SHIFTL0(CAST(64, MSB(op_MUL_22), DUP(op_MUL_22)), SN(32, 1)); + RzILOpPure *op_SUB_27 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_25); + RzILOpPure *op_RSHIFT_36 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_36, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_44 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_46 = LOGAND(op_RSHIFT_44, SN(32, 0xffff)); + RzILOpPure *op_MUL_49 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_38), DUP(op_AND_38))), CAST(16, MSB(DUP(op_AND_38)), DUP(op_AND_38))), CAST(32, MSB(CAST(16, MSB(op_AND_46), DUP(op_AND_46))), CAST(16, MSB(DUP(op_AND_46)), DUP(op_AND_46)))); + RzILOpPure *op_LSHIFT_52 = SHIFTL0(CAST(64, MSB(op_MUL_49), DUP(op_MUL_49)), SN(32, 1)); + RzILOpPure *op_SUB_54 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_52); + RzILOpPure *op_EQ_55 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_27), SN(32, 0), SN(32, 0x20)), op_SUB_54); + RzILOpPure *op_RSHIFT_84 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_86 = LOGAND(op_RSHIFT_84, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_92 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_94 = LOGAND(op_RSHIFT_92, SN(32, 0xffff)); + RzILOpPure *op_MUL_97 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_86), DUP(op_AND_86))), CAST(16, MSB(DUP(op_AND_86)), DUP(op_AND_86))), CAST(32, MSB(CAST(16, MSB(op_AND_94), DUP(op_AND_94))), CAST(16, MSB(DUP(op_AND_94)), DUP(op_AND_94)))); + RzILOpPure *op_LSHIFT_100 = SHIFTL0(CAST(64, MSB(op_MUL_97), DUP(op_MUL_97)), SN(32, 1)); + RzILOpPure *op_SUB_102 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_100); + RzILOpPure *op_LT_105 = SLT(op_SUB_102, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_110 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_111 = NEG(op_LSHIFT_110); + RzILOpPure *op_LSHIFT_116 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_119 = SUB(op_LSHIFT_116, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_120 = ITE(op_LT_105, op_NEG_111, op_SUB_119); + RzILOpEffect *gcc_expr_121 = BRANCH(op_EQ_55, EMPTY(), set_usr_field_call_80); + + // h_tmp345 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_123 = SETL("h_tmp345", cond_120); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) - (((st64) ...; + RzILOpEffect *seq_124 = SEQN(2, gcc_expr_121, op_ASSIGN_hybrid_tmp_123); + + // Rx = ((st32) ((sextract64(((ut64) ((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)) ? ((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) : h_tmp345)); + RzILOpPure *op_RSHIFT_59 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_61 = LOGAND(op_RSHIFT_59, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_67 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_69 = LOGAND(op_RSHIFT_67, SN(32, 0xffff)); + RzILOpPure *op_MUL_72 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_61), DUP(op_AND_61))), CAST(16, MSB(DUP(op_AND_61)), DUP(op_AND_61))), CAST(32, MSB(CAST(16, MSB(op_AND_69), DUP(op_AND_69))), CAST(16, MSB(DUP(op_AND_69)), DUP(op_AND_69)))); + RzILOpPure *op_LSHIFT_75 = SHIFTL0(CAST(64, MSB(op_MUL_72), DUP(op_MUL_72)), SN(32, 1)); + RzILOpPure *op_SUB_77 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_75); + RzILOpPure *cond_125 = ITE(DUP(op_EQ_55), op_SUB_77, VARL("h_tmp345")); + RzILOpEffect *op_ASSIGN_127 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(cond_125), DUP(cond_125))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) - (((st ...; + RzILOpEffect *seq_128 = SEQN(2, seq_124, op_ASSIGN_127); + + RzILOpEffect *instruction_sequence = seq_128; + return instruction_sequence; +} + +// Rx -= mpy(Rs.l,Rt.l):sat +RzILOpEffect *hex_il_op_m2_mpy_nac_sat_ll_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_74 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff))))), 0x0, 0x20) == ((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpPure *op_MUL_22 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_19), DUP(op_AND_19))), CAST(16, MSB(DUP(op_AND_19)), DUP(op_AND_19)))); + RzILOpPure *op_SUB_25 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_22), DUP(op_MUL_22))); + RzILOpPure *op_RSHIFT_34 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_36 = LOGAND(op_RSHIFT_34, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_42 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_44 = LOGAND(op_RSHIFT_42, SN(32, 0xffff)); + RzILOpPure *op_MUL_47 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_36), DUP(op_AND_36))), CAST(16, MSB(DUP(op_AND_36)), DUP(op_AND_36))), CAST(32, MSB(CAST(16, MSB(op_AND_44), DUP(op_AND_44))), CAST(16, MSB(DUP(op_AND_44)), DUP(op_AND_44)))); + RzILOpPure *op_SUB_50 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_47), DUP(op_MUL_47))); + RzILOpPure *op_EQ_51 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_25), SN(32, 0), SN(32, 0x20)), op_SUB_50); + RzILOpPure *op_RSHIFT_78 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_80 = LOGAND(op_RSHIFT_78, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_86 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_88 = LOGAND(op_RSHIFT_86, SN(32, 0xffff)); + RzILOpPure *op_MUL_91 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_80), DUP(op_AND_80))), CAST(16, MSB(DUP(op_AND_80)), DUP(op_AND_80))), CAST(32, MSB(CAST(16, MSB(op_AND_88), DUP(op_AND_88))), CAST(16, MSB(DUP(op_AND_88)), DUP(op_AND_88)))); + RzILOpPure *op_SUB_94 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_91), DUP(op_MUL_91))); + RzILOpPure *op_LT_97 = SLT(op_SUB_94, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_102 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_103 = NEG(op_LSHIFT_102); + RzILOpPure *op_LSHIFT_108 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_111 = SUB(op_LSHIFT_108, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_112 = ITE(op_LT_97, op_NEG_103, op_SUB_111); + RzILOpEffect *gcc_expr_113 = BRANCH(op_EQ_51, EMPTY(), set_usr_field_call_74); + + // h_tmp346 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff))))), 0x0, 0x20) == ((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_115 = SETL("h_tmp346", cond_112); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) - ((st64) ( ...; + RzILOpEffect *seq_116 = SEQN(2, gcc_expr_113, op_ASSIGN_hybrid_tmp_115); + + // Rx = ((st32) ((sextract64(((ut64) ((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff))))), 0x0, 0x20) == ((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff))))) ? ((st64) Rx) - ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) : h_tmp346)); + RzILOpPure *op_RSHIFT_55 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_57 = LOGAND(op_RSHIFT_55, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_63 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_65 = LOGAND(op_RSHIFT_63, SN(32, 0xffff)); + RzILOpPure *op_MUL_68 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_57), DUP(op_AND_57))), CAST(16, MSB(DUP(op_AND_57)), DUP(op_AND_57))), CAST(32, MSB(CAST(16, MSB(op_AND_65), DUP(op_AND_65))), CAST(16, MSB(DUP(op_AND_65)), DUP(op_AND_65)))); + RzILOpPure *op_SUB_71 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), CAST(64, MSB(op_MUL_68), DUP(op_MUL_68))); + RzILOpPure *cond_117 = ITE(DUP(op_EQ_51), op_SUB_71, VARL("h_tmp346")); + RzILOpEffect *op_ASSIGN_119 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(cond_117), DUP(cond_117))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) - ((st6 ...; + RzILOpEffect *seq_120 = SEQN(2, seq_116, op_ASSIGN_119); + + RzILOpEffect *instruction_sequence = seq_120; + return instruction_sequence; +} + +// Rx -= mpy(Rs.l,Rt.l):<<1:sat +RzILOpEffect *hex_il_op_m2_mpy_nac_sat_ll_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_80 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpPure *op_MUL_22 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_19), DUP(op_AND_19))), CAST(16, MSB(DUP(op_AND_19)), DUP(op_AND_19)))); + RzILOpPure *op_LSHIFT_25 = SHIFTL0(CAST(64, MSB(op_MUL_22), DUP(op_MUL_22)), SN(32, 1)); + RzILOpPure *op_SUB_27 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_25); + RzILOpPure *op_RSHIFT_36 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_36, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_44 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_46 = LOGAND(op_RSHIFT_44, SN(32, 0xffff)); + RzILOpPure *op_MUL_49 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_38), DUP(op_AND_38))), CAST(16, MSB(DUP(op_AND_38)), DUP(op_AND_38))), CAST(32, MSB(CAST(16, MSB(op_AND_46), DUP(op_AND_46))), CAST(16, MSB(DUP(op_AND_46)), DUP(op_AND_46)))); + RzILOpPure *op_LSHIFT_52 = SHIFTL0(CAST(64, MSB(op_MUL_49), DUP(op_MUL_49)), SN(32, 1)); + RzILOpPure *op_SUB_54 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_52); + RzILOpPure *op_EQ_55 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_27), SN(32, 0), SN(32, 0x20)), op_SUB_54); + RzILOpPure *op_RSHIFT_84 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_86 = LOGAND(op_RSHIFT_84, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_92 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_94 = LOGAND(op_RSHIFT_92, SN(32, 0xffff)); + RzILOpPure *op_MUL_97 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_86), DUP(op_AND_86))), CAST(16, MSB(DUP(op_AND_86)), DUP(op_AND_86))), CAST(32, MSB(CAST(16, MSB(op_AND_94), DUP(op_AND_94))), CAST(16, MSB(DUP(op_AND_94)), DUP(op_AND_94)))); + RzILOpPure *op_LSHIFT_100 = SHIFTL0(CAST(64, MSB(op_MUL_97), DUP(op_MUL_97)), SN(32, 1)); + RzILOpPure *op_SUB_102 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_100); + RzILOpPure *op_LT_105 = SLT(op_SUB_102, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_110 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_111 = NEG(op_LSHIFT_110); + RzILOpPure *op_LSHIFT_116 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_119 = SUB(op_LSHIFT_116, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_120 = ITE(op_LT_105, op_NEG_111, op_SUB_119); + RzILOpEffect *gcc_expr_121 = BRANCH(op_EQ_55, EMPTY(), set_usr_field_call_80); + + // h_tmp347 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_123 = SETL("h_tmp347", cond_120); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) - (((st64) ...; + RzILOpEffect *seq_124 = SEQN(2, gcc_expr_121, op_ASSIGN_hybrid_tmp_123); + + // Rx = ((st32) ((sextract64(((ut64) ((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)) ? ((st64) Rx) - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) : h_tmp347)); + RzILOpPure *op_RSHIFT_59 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_61 = LOGAND(op_RSHIFT_59, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_67 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_69 = LOGAND(op_RSHIFT_67, SN(32, 0xffff)); + RzILOpPure *op_MUL_72 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_61), DUP(op_AND_61))), CAST(16, MSB(DUP(op_AND_61)), DUP(op_AND_61))), CAST(32, MSB(CAST(16, MSB(op_AND_69), DUP(op_AND_69))), CAST(16, MSB(DUP(op_AND_69)), DUP(op_AND_69)))); + RzILOpPure *op_LSHIFT_75 = SHIFTL0(CAST(64, MSB(op_MUL_72), DUP(op_MUL_72)), SN(32, 1)); + RzILOpPure *op_SUB_77 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_75); + RzILOpPure *cond_125 = ITE(DUP(op_EQ_55), op_SUB_77, VARL("h_tmp347")); + RzILOpEffect *op_ASSIGN_127 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(cond_125), DUP(cond_125))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) - (((st ...; + RzILOpEffect *seq_128 = SEQN(2, seq_124, op_ASSIGN_127); + + RzILOpEffect *instruction_sequence = seq_128; + return instruction_sequence; +} + +// Rd = mpy(Rs.h,Rt.h):rnd +RzILOpEffect *hex_il_op_m2_mpy_rnd_hh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((st32) ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) + ((st64) 0x8000)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_ADD_23 = ADD(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpEffect *op_ASSIGN_25 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(op_ADD_23), DUP(op_ADD_23))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_25; + return instruction_sequence; +} + +// Rd = mpy(Rs.h,Rt.h):<<1:rnd +RzILOpEffect *hex_il_op_m2_mpy_rnd_hh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((st32) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), SN(32, 1)); + RzILOpPure *op_ADD_25 = ADD(op_LSHIFT_22, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpEffect *op_ASSIGN_27 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(op_ADD_25), DUP(op_ADD_25))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_27; + return instruction_sequence; +} + +// Rd = mpy(Rs.h,Rt.l):rnd +RzILOpEffect *hex_il_op_m2_mpy_rnd_hl_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((st32) ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) + ((st64) 0x8000)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_ADD_23 = ADD(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpEffect *op_ASSIGN_25 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(op_ADD_23), DUP(op_ADD_23))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_25; + return instruction_sequence; +} + +// Rd = mpy(Rs.h,Rt.l):<<1:rnd +RzILOpEffect *hex_il_op_m2_mpy_rnd_hl_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((st32) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + ((st64) 0x8000)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), SN(32, 1)); + RzILOpPure *op_ADD_25 = ADD(op_LSHIFT_22, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpEffect *op_ASSIGN_27 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(op_ADD_25), DUP(op_ADD_25))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_27; + return instruction_sequence; +} + +// Rd = mpy(Rs.l,Rt.h):rnd +RzILOpEffect *hex_il_op_m2_mpy_rnd_lh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((st32) ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) + ((st64) 0x8000)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_ADD_23 = ADD(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpEffect *op_ASSIGN_25 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(op_ADD_23), DUP(op_ADD_23))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_25; + return instruction_sequence; +} + +// Rd = mpy(Rs.l,Rt.h):<<1:rnd +RzILOpEffect *hex_il_op_m2_mpy_rnd_lh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((st32) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), SN(32, 1)); + RzILOpPure *op_ADD_25 = ADD(op_LSHIFT_22, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpEffect *op_ASSIGN_27 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(op_ADD_25), DUP(op_ADD_25))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_27; + return instruction_sequence; +} + +// Rd = mpy(Rs.l,Rt.l):rnd +RzILOpEffect *hex_il_op_m2_mpy_rnd_ll_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((st32) ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) + ((st64) 0x8000)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_ADD_23 = ADD(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpEffect *op_ASSIGN_25 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(op_ADD_23), DUP(op_ADD_23))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_25; + return instruction_sequence; +} + +// Rd = mpy(Rs.l,Rt.l):<<1:rnd +RzILOpEffect *hex_il_op_m2_mpy_rnd_ll_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((st32) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + ((st64) 0x8000)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), SN(32, 1)); + RzILOpPure *op_ADD_25 = ADD(op_LSHIFT_22, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpEffect *op_ASSIGN_27 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(op_ADD_25), DUP(op_ADD_25))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_27; + return instruction_sequence; +} + +// Rd = mpy(Rs.h,Rt.h):sat +RzILOpEffect *hex_il_op_m2_mpy_sat_hh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_68 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff))))), 0x0, 0x20) == ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpPure *op_MUL_22 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_19), DUP(op_AND_19))), CAST(16, MSB(DUP(op_AND_19)), DUP(op_AND_19)))); + RzILOpPure *op_RSHIFT_32 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_34 = LOGAND(op_RSHIFT_32, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_40 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_42 = LOGAND(op_RSHIFT_40, SN(32, 0xffff)); + RzILOpPure *op_MUL_45 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_34), DUP(op_AND_34))), CAST(16, MSB(DUP(op_AND_34)), DUP(op_AND_34))), CAST(32, MSB(CAST(16, MSB(op_AND_42), DUP(op_AND_42))), CAST(16, MSB(DUP(op_AND_42)), DUP(op_AND_42)))); + RzILOpPure *op_EQ_47 = EQ(SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(op_MUL_22), DUP(op_MUL_22))), SN(32, 0), SN(32, 0x20)), CAST(64, MSB(op_MUL_45), DUP(op_MUL_45))); + RzILOpPure *op_RSHIFT_72 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_74 = LOGAND(op_RSHIFT_72, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_80 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_82 = LOGAND(op_RSHIFT_80, SN(32, 0xffff)); + RzILOpPure *op_MUL_85 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_74), DUP(op_AND_74))), CAST(16, MSB(DUP(op_AND_74)), DUP(op_AND_74))), CAST(32, MSB(CAST(16, MSB(op_AND_82), DUP(op_AND_82))), CAST(16, MSB(DUP(op_AND_82)), DUP(op_AND_82)))); + RzILOpPure *op_LT_89 = SLT(CAST(64, MSB(op_MUL_85), DUP(op_MUL_85)), CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_94 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_95 = NEG(op_LSHIFT_94); + RzILOpPure *op_LSHIFT_100 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_103 = SUB(op_LSHIFT_100, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_104 = ITE(op_LT_89, op_NEG_95, op_SUB_103); + RzILOpEffect *gcc_expr_105 = BRANCH(op_EQ_47, EMPTY(), set_usr_field_call_68); + + // h_tmp348 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff))))), 0x0, 0x20) == ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_107 = SETL("h_tmp348", cond_104); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ...; + RzILOpEffect *seq_108 = SEQN(2, gcc_expr_105, op_ASSIGN_hybrid_tmp_107); + + // Rd = ((st32) ((sextract64(((ut64) ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff))))), 0x0, 0x20) == ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff))))) ? ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) : h_tmp348)); + RzILOpPure *op_RSHIFT_51 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_53 = LOGAND(op_RSHIFT_51, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_59 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_61 = LOGAND(op_RSHIFT_59, SN(32, 0xffff)); + RzILOpPure *op_MUL_64 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_53), DUP(op_AND_53))), CAST(16, MSB(DUP(op_AND_53)), DUP(op_AND_53))), CAST(32, MSB(CAST(16, MSB(op_AND_61), DUP(op_AND_61))), CAST(16, MSB(DUP(op_AND_61)), DUP(op_AND_61)))); + RzILOpPure *cond_109 = ITE(DUP(op_EQ_47), CAST(64, MSB(op_MUL_64), DUP(op_MUL_64)), VARL("h_tmp348")); + RzILOpEffect *op_ASSIGN_111 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_109), DUP(cond_109))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((s ...; + RzILOpEffect *seq_112 = SEQN(2, seq_108, op_ASSIGN_111); + + RzILOpEffect *instruction_sequence = seq_112; + return instruction_sequence; +} + +// Rd = mpy(Rs.h,Rt.h):<<1:sat +RzILOpEffect *hex_il_op_m2_mpy_sat_hh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_74 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpPure *op_MUL_22 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_19), DUP(op_AND_19))), CAST(16, MSB(DUP(op_AND_19)), DUP(op_AND_19)))); + RzILOpPure *op_LSHIFT_25 = SHIFTL0(CAST(64, MSB(op_MUL_22), DUP(op_MUL_22)), SN(32, 1)); + RzILOpPure *op_RSHIFT_34 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_36 = LOGAND(op_RSHIFT_34, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_42 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_44 = LOGAND(op_RSHIFT_42, SN(32, 0xffff)); + RzILOpPure *op_MUL_47 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_36), DUP(op_AND_36))), CAST(16, MSB(DUP(op_AND_36)), DUP(op_AND_36))), CAST(32, MSB(CAST(16, MSB(op_AND_44), DUP(op_AND_44))), CAST(16, MSB(DUP(op_AND_44)), DUP(op_AND_44)))); + RzILOpPure *op_LSHIFT_50 = SHIFTL0(CAST(64, MSB(op_MUL_47), DUP(op_MUL_47)), SN(32, 1)); + RzILOpPure *op_EQ_51 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_LSHIFT_25), SN(32, 0), SN(32, 0x20)), op_LSHIFT_50); + RzILOpPure *op_RSHIFT_78 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_80 = LOGAND(op_RSHIFT_78, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_86 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_88 = LOGAND(op_RSHIFT_86, SN(32, 0xffff)); + RzILOpPure *op_MUL_91 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_80), DUP(op_AND_80))), CAST(16, MSB(DUP(op_AND_80)), DUP(op_AND_80))), CAST(32, MSB(CAST(16, MSB(op_AND_88), DUP(op_AND_88))), CAST(16, MSB(DUP(op_AND_88)), DUP(op_AND_88)))); + RzILOpPure *op_LSHIFT_94 = SHIFTL0(CAST(64, MSB(op_MUL_91), DUP(op_MUL_91)), SN(32, 1)); + RzILOpPure *op_LT_97 = SLT(op_LSHIFT_94, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_102 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_103 = NEG(op_LSHIFT_102); + RzILOpPure *op_LSHIFT_108 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_111 = SUB(op_LSHIFT_108, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_112 = ITE(op_LT_97, op_NEG_103, op_SUB_111); + RzILOpEffect *gcc_expr_113 = BRANCH(op_EQ_51, EMPTY(), set_usr_field_call_74); + + // h_tmp349 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_115 = SETL("h_tmp349", cond_112); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_116 = SEQN(2, gcc_expr_113, op_ASSIGN_hybrid_tmp_115); + + // Rd = ((st32) ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)) ? (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) : h_tmp349)); + RzILOpPure *op_RSHIFT_55 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_57 = LOGAND(op_RSHIFT_55, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_63 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_65 = LOGAND(op_RSHIFT_63, SN(32, 0xffff)); + RzILOpPure *op_MUL_68 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_57), DUP(op_AND_57))), CAST(16, MSB(DUP(op_AND_57)), DUP(op_AND_57))), CAST(32, MSB(CAST(16, MSB(op_AND_65), DUP(op_AND_65))), CAST(16, MSB(DUP(op_AND_65)), DUP(op_AND_65)))); + RzILOpPure *op_LSHIFT_71 = SHIFTL0(CAST(64, MSB(op_MUL_68), DUP(op_MUL_68)), SN(32, 1)); + RzILOpPure *cond_117 = ITE(DUP(op_EQ_51), op_LSHIFT_71, VARL("h_tmp349")); + RzILOpEffect *op_ASSIGN_119 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_117), DUP(cond_117))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_120 = SEQN(2, seq_116, op_ASSIGN_119); + + RzILOpEffect *instruction_sequence = seq_120; + return instruction_sequence; +} + +// Rd = mpy(Rs.h,Rt.l):sat +RzILOpEffect *hex_il_op_m2_mpy_sat_hl_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_68 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff))))), 0x0, 0x20) == ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpPure *op_MUL_22 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_19), DUP(op_AND_19))), CAST(16, MSB(DUP(op_AND_19)), DUP(op_AND_19)))); + RzILOpPure *op_RSHIFT_32 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_34 = LOGAND(op_RSHIFT_32, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_40 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_42 = LOGAND(op_RSHIFT_40, SN(32, 0xffff)); + RzILOpPure *op_MUL_45 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_34), DUP(op_AND_34))), CAST(16, MSB(DUP(op_AND_34)), DUP(op_AND_34))), CAST(32, MSB(CAST(16, MSB(op_AND_42), DUP(op_AND_42))), CAST(16, MSB(DUP(op_AND_42)), DUP(op_AND_42)))); + RzILOpPure *op_EQ_47 = EQ(SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(op_MUL_22), DUP(op_MUL_22))), SN(32, 0), SN(32, 0x20)), CAST(64, MSB(op_MUL_45), DUP(op_MUL_45))); + RzILOpPure *op_RSHIFT_72 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_74 = LOGAND(op_RSHIFT_72, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_80 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_82 = LOGAND(op_RSHIFT_80, SN(32, 0xffff)); + RzILOpPure *op_MUL_85 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_74), DUP(op_AND_74))), CAST(16, MSB(DUP(op_AND_74)), DUP(op_AND_74))), CAST(32, MSB(CAST(16, MSB(op_AND_82), DUP(op_AND_82))), CAST(16, MSB(DUP(op_AND_82)), DUP(op_AND_82)))); + RzILOpPure *op_LT_89 = SLT(CAST(64, MSB(op_MUL_85), DUP(op_MUL_85)), CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_94 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_95 = NEG(op_LSHIFT_94); + RzILOpPure *op_LSHIFT_100 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_103 = SUB(op_LSHIFT_100, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_104 = ITE(op_LT_89, op_NEG_95, op_SUB_103); + RzILOpEffect *gcc_expr_105 = BRANCH(op_EQ_47, EMPTY(), set_usr_field_call_68); + + // h_tmp350 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff))))), 0x0, 0x20) == ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_107 = SETL("h_tmp350", cond_104); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ...; + RzILOpEffect *seq_108 = SEQN(2, gcc_expr_105, op_ASSIGN_hybrid_tmp_107); + + // Rd = ((st32) ((sextract64(((ut64) ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff))))), 0x0, 0x20) == ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff))))) ? ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) : h_tmp350)); + RzILOpPure *op_RSHIFT_51 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_53 = LOGAND(op_RSHIFT_51, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_59 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_61 = LOGAND(op_RSHIFT_59, SN(32, 0xffff)); + RzILOpPure *op_MUL_64 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_53), DUP(op_AND_53))), CAST(16, MSB(DUP(op_AND_53)), DUP(op_AND_53))), CAST(32, MSB(CAST(16, MSB(op_AND_61), DUP(op_AND_61))), CAST(16, MSB(DUP(op_AND_61)), DUP(op_AND_61)))); + RzILOpPure *cond_109 = ITE(DUP(op_EQ_47), CAST(64, MSB(op_MUL_64), DUP(op_MUL_64)), VARL("h_tmp350")); + RzILOpEffect *op_ASSIGN_111 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_109), DUP(cond_109))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((s ...; + RzILOpEffect *seq_112 = SEQN(2, seq_108, op_ASSIGN_111); + + RzILOpEffect *instruction_sequence = seq_112; + return instruction_sequence; +} + +// Rd = mpy(Rs.h,Rt.l):<<1:sat +RzILOpEffect *hex_il_op_m2_mpy_sat_hl_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_74 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpPure *op_MUL_22 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_19), DUP(op_AND_19))), CAST(16, MSB(DUP(op_AND_19)), DUP(op_AND_19)))); + RzILOpPure *op_LSHIFT_25 = SHIFTL0(CAST(64, MSB(op_MUL_22), DUP(op_MUL_22)), SN(32, 1)); + RzILOpPure *op_RSHIFT_34 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_36 = LOGAND(op_RSHIFT_34, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_42 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_44 = LOGAND(op_RSHIFT_42, SN(32, 0xffff)); + RzILOpPure *op_MUL_47 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_36), DUP(op_AND_36))), CAST(16, MSB(DUP(op_AND_36)), DUP(op_AND_36))), CAST(32, MSB(CAST(16, MSB(op_AND_44), DUP(op_AND_44))), CAST(16, MSB(DUP(op_AND_44)), DUP(op_AND_44)))); + RzILOpPure *op_LSHIFT_50 = SHIFTL0(CAST(64, MSB(op_MUL_47), DUP(op_MUL_47)), SN(32, 1)); + RzILOpPure *op_EQ_51 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_LSHIFT_25), SN(32, 0), SN(32, 0x20)), op_LSHIFT_50); + RzILOpPure *op_RSHIFT_78 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_80 = LOGAND(op_RSHIFT_78, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_86 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_88 = LOGAND(op_RSHIFT_86, SN(32, 0xffff)); + RzILOpPure *op_MUL_91 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_80), DUP(op_AND_80))), CAST(16, MSB(DUP(op_AND_80)), DUP(op_AND_80))), CAST(32, MSB(CAST(16, MSB(op_AND_88), DUP(op_AND_88))), CAST(16, MSB(DUP(op_AND_88)), DUP(op_AND_88)))); + RzILOpPure *op_LSHIFT_94 = SHIFTL0(CAST(64, MSB(op_MUL_91), DUP(op_MUL_91)), SN(32, 1)); + RzILOpPure *op_LT_97 = SLT(op_LSHIFT_94, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_102 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_103 = NEG(op_LSHIFT_102); + RzILOpPure *op_LSHIFT_108 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_111 = SUB(op_LSHIFT_108, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_112 = ITE(op_LT_97, op_NEG_103, op_SUB_111); + RzILOpEffect *gcc_expr_113 = BRANCH(op_EQ_51, EMPTY(), set_usr_field_call_74); + + // h_tmp351 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_115 = SETL("h_tmp351", cond_112); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_116 = SEQN(2, gcc_expr_113, op_ASSIGN_hybrid_tmp_115); + + // Rd = ((st32) ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)) ? (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) : h_tmp351)); + RzILOpPure *op_RSHIFT_55 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_57 = LOGAND(op_RSHIFT_55, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_63 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_65 = LOGAND(op_RSHIFT_63, SN(32, 0xffff)); + RzILOpPure *op_MUL_68 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_57), DUP(op_AND_57))), CAST(16, MSB(DUP(op_AND_57)), DUP(op_AND_57))), CAST(32, MSB(CAST(16, MSB(op_AND_65), DUP(op_AND_65))), CAST(16, MSB(DUP(op_AND_65)), DUP(op_AND_65)))); + RzILOpPure *op_LSHIFT_71 = SHIFTL0(CAST(64, MSB(op_MUL_68), DUP(op_MUL_68)), SN(32, 1)); + RzILOpPure *cond_117 = ITE(DUP(op_EQ_51), op_LSHIFT_71, VARL("h_tmp351")); + RzILOpEffect *op_ASSIGN_119 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_117), DUP(cond_117))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_120 = SEQN(2, seq_116, op_ASSIGN_119); + + RzILOpEffect *instruction_sequence = seq_120; + return instruction_sequence; +} + +// Rd = mpy(Rs.l,Rt.h):sat +RzILOpEffect *hex_il_op_m2_mpy_sat_lh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_68 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff))))), 0x0, 0x20) == ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpPure *op_MUL_22 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_19), DUP(op_AND_19))), CAST(16, MSB(DUP(op_AND_19)), DUP(op_AND_19)))); + RzILOpPure *op_RSHIFT_32 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_34 = LOGAND(op_RSHIFT_32, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_40 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_42 = LOGAND(op_RSHIFT_40, SN(32, 0xffff)); + RzILOpPure *op_MUL_45 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_34), DUP(op_AND_34))), CAST(16, MSB(DUP(op_AND_34)), DUP(op_AND_34))), CAST(32, MSB(CAST(16, MSB(op_AND_42), DUP(op_AND_42))), CAST(16, MSB(DUP(op_AND_42)), DUP(op_AND_42)))); + RzILOpPure *op_EQ_47 = EQ(SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(op_MUL_22), DUP(op_MUL_22))), SN(32, 0), SN(32, 0x20)), CAST(64, MSB(op_MUL_45), DUP(op_MUL_45))); + RzILOpPure *op_RSHIFT_72 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_74 = LOGAND(op_RSHIFT_72, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_80 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_82 = LOGAND(op_RSHIFT_80, SN(32, 0xffff)); + RzILOpPure *op_MUL_85 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_74), DUP(op_AND_74))), CAST(16, MSB(DUP(op_AND_74)), DUP(op_AND_74))), CAST(32, MSB(CAST(16, MSB(op_AND_82), DUP(op_AND_82))), CAST(16, MSB(DUP(op_AND_82)), DUP(op_AND_82)))); + RzILOpPure *op_LT_89 = SLT(CAST(64, MSB(op_MUL_85), DUP(op_MUL_85)), CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_94 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_95 = NEG(op_LSHIFT_94); + RzILOpPure *op_LSHIFT_100 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_103 = SUB(op_LSHIFT_100, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_104 = ITE(op_LT_89, op_NEG_95, op_SUB_103); + RzILOpEffect *gcc_expr_105 = BRANCH(op_EQ_47, EMPTY(), set_usr_field_call_68); + + // h_tmp352 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff))))), 0x0, 0x20) == ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_107 = SETL("h_tmp352", cond_104); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ...; + RzILOpEffect *seq_108 = SEQN(2, gcc_expr_105, op_ASSIGN_hybrid_tmp_107); + + // Rd = ((st32) ((sextract64(((ut64) ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff))))), 0x0, 0x20) == ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff))))) ? ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) : h_tmp352)); + RzILOpPure *op_RSHIFT_51 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_53 = LOGAND(op_RSHIFT_51, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_59 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_61 = LOGAND(op_RSHIFT_59, SN(32, 0xffff)); + RzILOpPure *op_MUL_64 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_53), DUP(op_AND_53))), CAST(16, MSB(DUP(op_AND_53)), DUP(op_AND_53))), CAST(32, MSB(CAST(16, MSB(op_AND_61), DUP(op_AND_61))), CAST(16, MSB(DUP(op_AND_61)), DUP(op_AND_61)))); + RzILOpPure *cond_109 = ITE(DUP(op_EQ_47), CAST(64, MSB(op_MUL_64), DUP(op_MUL_64)), VARL("h_tmp352")); + RzILOpEffect *op_ASSIGN_111 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_109), DUP(cond_109))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((s ...; + RzILOpEffect *seq_112 = SEQN(2, seq_108, op_ASSIGN_111); + + RzILOpEffect *instruction_sequence = seq_112; + return instruction_sequence; +} + +// Rd = mpy(Rs.l,Rt.h):<<1:sat +RzILOpEffect *hex_il_op_m2_mpy_sat_lh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_74 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpPure *op_MUL_22 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_19), DUP(op_AND_19))), CAST(16, MSB(DUP(op_AND_19)), DUP(op_AND_19)))); + RzILOpPure *op_LSHIFT_25 = SHIFTL0(CAST(64, MSB(op_MUL_22), DUP(op_MUL_22)), SN(32, 1)); + RzILOpPure *op_RSHIFT_34 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_36 = LOGAND(op_RSHIFT_34, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_42 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_44 = LOGAND(op_RSHIFT_42, SN(32, 0xffff)); + RzILOpPure *op_MUL_47 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_36), DUP(op_AND_36))), CAST(16, MSB(DUP(op_AND_36)), DUP(op_AND_36))), CAST(32, MSB(CAST(16, MSB(op_AND_44), DUP(op_AND_44))), CAST(16, MSB(DUP(op_AND_44)), DUP(op_AND_44)))); + RzILOpPure *op_LSHIFT_50 = SHIFTL0(CAST(64, MSB(op_MUL_47), DUP(op_MUL_47)), SN(32, 1)); + RzILOpPure *op_EQ_51 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_LSHIFT_25), SN(32, 0), SN(32, 0x20)), op_LSHIFT_50); + RzILOpPure *op_RSHIFT_78 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_80 = LOGAND(op_RSHIFT_78, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_86 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_88 = LOGAND(op_RSHIFT_86, SN(32, 0xffff)); + RzILOpPure *op_MUL_91 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_80), DUP(op_AND_80))), CAST(16, MSB(DUP(op_AND_80)), DUP(op_AND_80))), CAST(32, MSB(CAST(16, MSB(op_AND_88), DUP(op_AND_88))), CAST(16, MSB(DUP(op_AND_88)), DUP(op_AND_88)))); + RzILOpPure *op_LSHIFT_94 = SHIFTL0(CAST(64, MSB(op_MUL_91), DUP(op_MUL_91)), SN(32, 1)); + RzILOpPure *op_LT_97 = SLT(op_LSHIFT_94, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_102 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_103 = NEG(op_LSHIFT_102); + RzILOpPure *op_LSHIFT_108 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_111 = SUB(op_LSHIFT_108, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_112 = ITE(op_LT_97, op_NEG_103, op_SUB_111); + RzILOpEffect *gcc_expr_113 = BRANCH(op_EQ_51, EMPTY(), set_usr_field_call_74); + + // h_tmp353 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_115 = SETL("h_tmp353", cond_112); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_116 = SEQN(2, gcc_expr_113, op_ASSIGN_hybrid_tmp_115); + + // Rd = ((st32) ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)) ? (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) : h_tmp353)); + RzILOpPure *op_RSHIFT_55 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_57 = LOGAND(op_RSHIFT_55, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_63 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_65 = LOGAND(op_RSHIFT_63, SN(32, 0xffff)); + RzILOpPure *op_MUL_68 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_57), DUP(op_AND_57))), CAST(16, MSB(DUP(op_AND_57)), DUP(op_AND_57))), CAST(32, MSB(CAST(16, MSB(op_AND_65), DUP(op_AND_65))), CAST(16, MSB(DUP(op_AND_65)), DUP(op_AND_65)))); + RzILOpPure *op_LSHIFT_71 = SHIFTL0(CAST(64, MSB(op_MUL_68), DUP(op_MUL_68)), SN(32, 1)); + RzILOpPure *cond_117 = ITE(DUP(op_EQ_51), op_LSHIFT_71, VARL("h_tmp353")); + RzILOpEffect *op_ASSIGN_119 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_117), DUP(cond_117))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_120 = SEQN(2, seq_116, op_ASSIGN_119); + + RzILOpEffect *instruction_sequence = seq_120; + return instruction_sequence; +} + +// Rd = mpy(Rs.l,Rt.l):sat +RzILOpEffect *hex_il_op_m2_mpy_sat_ll_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_68 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff))))), 0x0, 0x20) == ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpPure *op_MUL_22 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_19), DUP(op_AND_19))), CAST(16, MSB(DUP(op_AND_19)), DUP(op_AND_19)))); + RzILOpPure *op_RSHIFT_32 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_34 = LOGAND(op_RSHIFT_32, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_40 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_42 = LOGAND(op_RSHIFT_40, SN(32, 0xffff)); + RzILOpPure *op_MUL_45 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_34), DUP(op_AND_34))), CAST(16, MSB(DUP(op_AND_34)), DUP(op_AND_34))), CAST(32, MSB(CAST(16, MSB(op_AND_42), DUP(op_AND_42))), CAST(16, MSB(DUP(op_AND_42)), DUP(op_AND_42)))); + RzILOpPure *op_EQ_47 = EQ(SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(op_MUL_22), DUP(op_MUL_22))), SN(32, 0), SN(32, 0x20)), CAST(64, MSB(op_MUL_45), DUP(op_MUL_45))); + RzILOpPure *op_RSHIFT_72 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_74 = LOGAND(op_RSHIFT_72, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_80 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_82 = LOGAND(op_RSHIFT_80, SN(32, 0xffff)); + RzILOpPure *op_MUL_85 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_74), DUP(op_AND_74))), CAST(16, MSB(DUP(op_AND_74)), DUP(op_AND_74))), CAST(32, MSB(CAST(16, MSB(op_AND_82), DUP(op_AND_82))), CAST(16, MSB(DUP(op_AND_82)), DUP(op_AND_82)))); + RzILOpPure *op_LT_89 = SLT(CAST(64, MSB(op_MUL_85), DUP(op_MUL_85)), CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_94 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_95 = NEG(op_LSHIFT_94); + RzILOpPure *op_LSHIFT_100 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_103 = SUB(op_LSHIFT_100, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_104 = ITE(op_LT_89, op_NEG_95, op_SUB_103); + RzILOpEffect *gcc_expr_105 = BRANCH(op_EQ_47, EMPTY(), set_usr_field_call_68); + + // h_tmp354 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff))))), 0x0, 0x20) == ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_107 = SETL("h_tmp354", cond_104); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ...; + RzILOpEffect *seq_108 = SEQN(2, gcc_expr_105, op_ASSIGN_hybrid_tmp_107); + + // Rd = ((st32) ((sextract64(((ut64) ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff))))), 0x0, 0x20) == ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff))))) ? ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) : h_tmp354)); + RzILOpPure *op_RSHIFT_51 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_53 = LOGAND(op_RSHIFT_51, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_59 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_61 = LOGAND(op_RSHIFT_59, SN(32, 0xffff)); + RzILOpPure *op_MUL_64 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_53), DUP(op_AND_53))), CAST(16, MSB(DUP(op_AND_53)), DUP(op_AND_53))), CAST(32, MSB(CAST(16, MSB(op_AND_61), DUP(op_AND_61))), CAST(16, MSB(DUP(op_AND_61)), DUP(op_AND_61)))); + RzILOpPure *cond_109 = ITE(DUP(op_EQ_47), CAST(64, MSB(op_MUL_64), DUP(op_MUL_64)), VARL("h_tmp354")); + RzILOpEffect *op_ASSIGN_111 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_109), DUP(cond_109))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((s ...; + RzILOpEffect *seq_112 = SEQN(2, seq_108, op_ASSIGN_111); + + RzILOpEffect *instruction_sequence = seq_112; + return instruction_sequence; +} + +// Rd = mpy(Rs.l,Rt.l):<<1:sat +RzILOpEffect *hex_il_op_m2_mpy_sat_ll_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_74 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpPure *op_MUL_22 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_19), DUP(op_AND_19))), CAST(16, MSB(DUP(op_AND_19)), DUP(op_AND_19)))); + RzILOpPure *op_LSHIFT_25 = SHIFTL0(CAST(64, MSB(op_MUL_22), DUP(op_MUL_22)), SN(32, 1)); + RzILOpPure *op_RSHIFT_34 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_36 = LOGAND(op_RSHIFT_34, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_42 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_44 = LOGAND(op_RSHIFT_42, SN(32, 0xffff)); + RzILOpPure *op_MUL_47 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_36), DUP(op_AND_36))), CAST(16, MSB(DUP(op_AND_36)), DUP(op_AND_36))), CAST(32, MSB(CAST(16, MSB(op_AND_44), DUP(op_AND_44))), CAST(16, MSB(DUP(op_AND_44)), DUP(op_AND_44)))); + RzILOpPure *op_LSHIFT_50 = SHIFTL0(CAST(64, MSB(op_MUL_47), DUP(op_MUL_47)), SN(32, 1)); + RzILOpPure *op_EQ_51 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_LSHIFT_25), SN(32, 0), SN(32, 0x20)), op_LSHIFT_50); + RzILOpPure *op_RSHIFT_78 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_80 = LOGAND(op_RSHIFT_78, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_86 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_88 = LOGAND(op_RSHIFT_86, SN(32, 0xffff)); + RzILOpPure *op_MUL_91 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_80), DUP(op_AND_80))), CAST(16, MSB(DUP(op_AND_80)), DUP(op_AND_80))), CAST(32, MSB(CAST(16, MSB(op_AND_88), DUP(op_AND_88))), CAST(16, MSB(DUP(op_AND_88)), DUP(op_AND_88)))); + RzILOpPure *op_LSHIFT_94 = SHIFTL0(CAST(64, MSB(op_MUL_91), DUP(op_MUL_91)), SN(32, 1)); + RzILOpPure *op_LT_97 = SLT(op_LSHIFT_94, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_102 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_103 = NEG(op_LSHIFT_102); + RzILOpPure *op_LSHIFT_108 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_111 = SUB(op_LSHIFT_108, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_112 = ITE(op_LT_97, op_NEG_103, op_SUB_111); + RzILOpEffect *gcc_expr_113 = BRANCH(op_EQ_51, EMPTY(), set_usr_field_call_74); + + // h_tmp355 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_115 = SETL("h_tmp355", cond_112); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_116 = SEQN(2, gcc_expr_113, op_ASSIGN_hybrid_tmp_115); + + // Rd = ((st32) ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)) ? (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) : h_tmp355)); + RzILOpPure *op_RSHIFT_55 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_57 = LOGAND(op_RSHIFT_55, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_63 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_65 = LOGAND(op_RSHIFT_63, SN(32, 0xffff)); + RzILOpPure *op_MUL_68 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_57), DUP(op_AND_57))), CAST(16, MSB(DUP(op_AND_57)), DUP(op_AND_57))), CAST(32, MSB(CAST(16, MSB(op_AND_65), DUP(op_AND_65))), CAST(16, MSB(DUP(op_AND_65)), DUP(op_AND_65)))); + RzILOpPure *op_LSHIFT_71 = SHIFTL0(CAST(64, MSB(op_MUL_68), DUP(op_MUL_68)), SN(32, 1)); + RzILOpPure *cond_117 = ITE(DUP(op_EQ_51), op_LSHIFT_71, VARL("h_tmp355")); + RzILOpEffect *op_ASSIGN_119 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_117), DUP(cond_117))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_120 = SEQN(2, seq_116, op_ASSIGN_119); + + RzILOpEffect *instruction_sequence = seq_120; + return instruction_sequence; +} + +// Rd = mpy(Rs.h,Rt.h):rnd:sat +RzILOpEffect *hex_il_op_m2_mpy_sat_rnd_hh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_77 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) + ((st64) 0x8000)), 0x0, 0x20) == ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpPure *op_MUL_22 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_19), DUP(op_AND_19))), CAST(16, MSB(DUP(op_AND_19)), DUP(op_AND_19)))); + RzILOpPure *op_ADD_26 = ADD(CAST(64, MSB(op_MUL_22), DUP(op_MUL_22)), CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_37 = LOGAND(op_RSHIFT_35, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_43 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_45 = LOGAND(op_RSHIFT_43, SN(32, 0xffff)); + RzILOpPure *op_MUL_48 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_37), DUP(op_AND_37))), CAST(16, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(CAST(16, MSB(op_AND_45), DUP(op_AND_45))), CAST(16, MSB(DUP(op_AND_45)), DUP(op_AND_45)))); + RzILOpPure *op_ADD_52 = ADD(CAST(64, MSB(op_MUL_48), DUP(op_MUL_48)), CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_EQ_53 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_26), SN(32, 0), SN(32, 0x20)), op_ADD_52); + RzILOpPure *op_RSHIFT_81 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_83 = LOGAND(op_RSHIFT_81, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_89 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_91 = LOGAND(op_RSHIFT_89, SN(32, 0xffff)); + RzILOpPure *op_MUL_94 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_83), DUP(op_AND_83))), CAST(16, MSB(DUP(op_AND_83)), DUP(op_AND_83))), CAST(32, MSB(CAST(16, MSB(op_AND_91), DUP(op_AND_91))), CAST(16, MSB(DUP(op_AND_91)), DUP(op_AND_91)))); + RzILOpPure *op_ADD_98 = ADD(CAST(64, MSB(op_MUL_94), DUP(op_MUL_94)), CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_LT_101 = SLT(op_ADD_98, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_106 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_107 = NEG(op_LSHIFT_106); + RzILOpPure *op_LSHIFT_112 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_115 = SUB(op_LSHIFT_112, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_116 = ITE(op_LT_101, op_NEG_107, op_SUB_115); + RzILOpEffect *gcc_expr_117 = BRANCH(op_EQ_53, EMPTY(), set_usr_field_call_77); + + // h_tmp356 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) + ((st64) 0x8000)), 0x0, 0x20) == ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_119 = SETL("h_tmp356", cond_116); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ...; + RzILOpEffect *seq_120 = SEQN(2, gcc_expr_117, op_ASSIGN_hybrid_tmp_119); + + // Rd = ((st32) ((sextract64(((ut64) ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) + ((st64) 0x8000)), 0x0, 0x20) == ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) + ((st64) 0x8000)) ? ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) + ((st64) 0x8000) : h_tmp356)); + RzILOpPure *op_RSHIFT_57 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_59 = LOGAND(op_RSHIFT_57, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_65 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_67 = LOGAND(op_RSHIFT_65, SN(32, 0xffff)); + RzILOpPure *op_MUL_70 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_59), DUP(op_AND_59))), CAST(16, MSB(DUP(op_AND_59)), DUP(op_AND_59))), CAST(32, MSB(CAST(16, MSB(op_AND_67), DUP(op_AND_67))), CAST(16, MSB(DUP(op_AND_67)), DUP(op_AND_67)))); + RzILOpPure *op_ADD_74 = ADD(CAST(64, MSB(op_MUL_70), DUP(op_MUL_70)), CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *cond_121 = ITE(DUP(op_EQ_53), op_ADD_74, VARL("h_tmp356")); + RzILOpEffect *op_ASSIGN_123 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_121), DUP(cond_121))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((s ...; + RzILOpEffect *seq_124 = SEQN(2, seq_120, op_ASSIGN_123); + + RzILOpEffect *instruction_sequence = seq_124; + return instruction_sequence; +} + +// Rd = mpy(Rs.h,Rt.h):<<1:rnd:sat +RzILOpEffect *hex_il_op_m2_mpy_sat_rnd_hh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_83 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpPure *op_MUL_22 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_19), DUP(op_AND_19))), CAST(16, MSB(DUP(op_AND_19)), DUP(op_AND_19)))); + RzILOpPure *op_LSHIFT_25 = SHIFTL0(CAST(64, MSB(op_MUL_22), DUP(op_MUL_22)), SN(32, 1)); + RzILOpPure *op_ADD_28 = ADD(op_LSHIFT_25, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_39 = LOGAND(op_RSHIFT_37, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_45 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_47 = LOGAND(op_RSHIFT_45, SN(32, 0xffff)); + RzILOpPure *op_MUL_50 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_39), DUP(op_AND_39))), CAST(16, MSB(DUP(op_AND_39)), DUP(op_AND_39))), CAST(32, MSB(CAST(16, MSB(op_AND_47), DUP(op_AND_47))), CAST(16, MSB(DUP(op_AND_47)), DUP(op_AND_47)))); + RzILOpPure *op_LSHIFT_53 = SHIFTL0(CAST(64, MSB(op_MUL_50), DUP(op_MUL_50)), SN(32, 1)); + RzILOpPure *op_ADD_56 = ADD(op_LSHIFT_53, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_EQ_57 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_28), SN(32, 0), SN(32, 0x20)), op_ADD_56); + RzILOpPure *op_RSHIFT_87 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_89 = LOGAND(op_RSHIFT_87, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_95 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_97 = LOGAND(op_RSHIFT_95, SN(32, 0xffff)); + RzILOpPure *op_MUL_100 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_89), DUP(op_AND_89))), CAST(16, MSB(DUP(op_AND_89)), DUP(op_AND_89))), CAST(32, MSB(CAST(16, MSB(op_AND_97), DUP(op_AND_97))), CAST(16, MSB(DUP(op_AND_97)), DUP(op_AND_97)))); + RzILOpPure *op_LSHIFT_103 = SHIFTL0(CAST(64, MSB(op_MUL_100), DUP(op_MUL_100)), SN(32, 1)); + RzILOpPure *op_ADD_106 = ADD(op_LSHIFT_103, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_LT_109 = SLT(op_ADD_106, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_114 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_115 = NEG(op_LSHIFT_114); + RzILOpPure *op_LSHIFT_120 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_123 = SUB(op_LSHIFT_120, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_124 = ITE(op_LT_109, op_NEG_115, op_SUB_123); + RzILOpEffect *gcc_expr_125 = BRANCH(op_EQ_57, EMPTY(), set_usr_field_call_83); + + // h_tmp357 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_127 = SETL("h_tmp357", cond_124); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_128 = SEQN(2, gcc_expr_125, op_ASSIGN_hybrid_tmp_127); + + // Rd = ((st32) ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000)) ? (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000) : h_tmp357)); + RzILOpPure *op_RSHIFT_61 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_63 = LOGAND(op_RSHIFT_61, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_69 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_71 = LOGAND(op_RSHIFT_69, SN(32, 0xffff)); + RzILOpPure *op_MUL_74 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_63), DUP(op_AND_63))), CAST(16, MSB(DUP(op_AND_63)), DUP(op_AND_63))), CAST(32, MSB(CAST(16, MSB(op_AND_71), DUP(op_AND_71))), CAST(16, MSB(DUP(op_AND_71)), DUP(op_AND_71)))); + RzILOpPure *op_LSHIFT_77 = SHIFTL0(CAST(64, MSB(op_MUL_74), DUP(op_MUL_74)), SN(32, 1)); + RzILOpPure *op_ADD_80 = ADD(op_LSHIFT_77, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *cond_129 = ITE(DUP(op_EQ_57), op_ADD_80, VARL("h_tmp357")); + RzILOpEffect *op_ASSIGN_131 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_129), DUP(cond_129))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_132 = SEQN(2, seq_128, op_ASSIGN_131); + + RzILOpEffect *instruction_sequence = seq_132; + return instruction_sequence; +} + +// Rd = mpy(Rs.h,Rt.l):rnd:sat +RzILOpEffect *hex_il_op_m2_mpy_sat_rnd_hl_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_77 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) + ((st64) 0x8000)), 0x0, 0x20) == ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpPure *op_MUL_22 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_19), DUP(op_AND_19))), CAST(16, MSB(DUP(op_AND_19)), DUP(op_AND_19)))); + RzILOpPure *op_ADD_26 = ADD(CAST(64, MSB(op_MUL_22), DUP(op_MUL_22)), CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_37 = LOGAND(op_RSHIFT_35, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_43 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_45 = LOGAND(op_RSHIFT_43, SN(32, 0xffff)); + RzILOpPure *op_MUL_48 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_37), DUP(op_AND_37))), CAST(16, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(CAST(16, MSB(op_AND_45), DUP(op_AND_45))), CAST(16, MSB(DUP(op_AND_45)), DUP(op_AND_45)))); + RzILOpPure *op_ADD_52 = ADD(CAST(64, MSB(op_MUL_48), DUP(op_MUL_48)), CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_EQ_53 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_26), SN(32, 0), SN(32, 0x20)), op_ADD_52); + RzILOpPure *op_RSHIFT_81 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_83 = LOGAND(op_RSHIFT_81, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_89 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_91 = LOGAND(op_RSHIFT_89, SN(32, 0xffff)); + RzILOpPure *op_MUL_94 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_83), DUP(op_AND_83))), CAST(16, MSB(DUP(op_AND_83)), DUP(op_AND_83))), CAST(32, MSB(CAST(16, MSB(op_AND_91), DUP(op_AND_91))), CAST(16, MSB(DUP(op_AND_91)), DUP(op_AND_91)))); + RzILOpPure *op_ADD_98 = ADD(CAST(64, MSB(op_MUL_94), DUP(op_MUL_94)), CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_LT_101 = SLT(op_ADD_98, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_106 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_107 = NEG(op_LSHIFT_106); + RzILOpPure *op_LSHIFT_112 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_115 = SUB(op_LSHIFT_112, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_116 = ITE(op_LT_101, op_NEG_107, op_SUB_115); + RzILOpEffect *gcc_expr_117 = BRANCH(op_EQ_53, EMPTY(), set_usr_field_call_77); + + // h_tmp358 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) + ((st64) 0x8000)), 0x0, 0x20) == ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_119 = SETL("h_tmp358", cond_116); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ...; + RzILOpEffect *seq_120 = SEQN(2, gcc_expr_117, op_ASSIGN_hybrid_tmp_119); + + // Rd = ((st32) ((sextract64(((ut64) ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) + ((st64) 0x8000)), 0x0, 0x20) == ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) + ((st64) 0x8000)) ? ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) + ((st64) 0x8000) : h_tmp358)); + RzILOpPure *op_RSHIFT_57 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_59 = LOGAND(op_RSHIFT_57, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_65 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_67 = LOGAND(op_RSHIFT_65, SN(32, 0xffff)); + RzILOpPure *op_MUL_70 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_59), DUP(op_AND_59))), CAST(16, MSB(DUP(op_AND_59)), DUP(op_AND_59))), CAST(32, MSB(CAST(16, MSB(op_AND_67), DUP(op_AND_67))), CAST(16, MSB(DUP(op_AND_67)), DUP(op_AND_67)))); + RzILOpPure *op_ADD_74 = ADD(CAST(64, MSB(op_MUL_70), DUP(op_MUL_70)), CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *cond_121 = ITE(DUP(op_EQ_53), op_ADD_74, VARL("h_tmp358")); + RzILOpEffect *op_ASSIGN_123 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_121), DUP(cond_121))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((s ...; + RzILOpEffect *seq_124 = SEQN(2, seq_120, op_ASSIGN_123); + + RzILOpEffect *instruction_sequence = seq_124; + return instruction_sequence; +} + +// Rd = mpy(Rs.h,Rt.l):<<1:rnd:sat +RzILOpEffect *hex_il_op_m2_mpy_sat_rnd_hl_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_83 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpPure *op_MUL_22 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_19), DUP(op_AND_19))), CAST(16, MSB(DUP(op_AND_19)), DUP(op_AND_19)))); + RzILOpPure *op_LSHIFT_25 = SHIFTL0(CAST(64, MSB(op_MUL_22), DUP(op_MUL_22)), SN(32, 1)); + RzILOpPure *op_ADD_28 = ADD(op_LSHIFT_25, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_39 = LOGAND(op_RSHIFT_37, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_45 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_47 = LOGAND(op_RSHIFT_45, SN(32, 0xffff)); + RzILOpPure *op_MUL_50 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_39), DUP(op_AND_39))), CAST(16, MSB(DUP(op_AND_39)), DUP(op_AND_39))), CAST(32, MSB(CAST(16, MSB(op_AND_47), DUP(op_AND_47))), CAST(16, MSB(DUP(op_AND_47)), DUP(op_AND_47)))); + RzILOpPure *op_LSHIFT_53 = SHIFTL0(CAST(64, MSB(op_MUL_50), DUP(op_MUL_50)), SN(32, 1)); + RzILOpPure *op_ADD_56 = ADD(op_LSHIFT_53, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_EQ_57 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_28), SN(32, 0), SN(32, 0x20)), op_ADD_56); + RzILOpPure *op_RSHIFT_87 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_89 = LOGAND(op_RSHIFT_87, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_95 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_97 = LOGAND(op_RSHIFT_95, SN(32, 0xffff)); + RzILOpPure *op_MUL_100 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_89), DUP(op_AND_89))), CAST(16, MSB(DUP(op_AND_89)), DUP(op_AND_89))), CAST(32, MSB(CAST(16, MSB(op_AND_97), DUP(op_AND_97))), CAST(16, MSB(DUP(op_AND_97)), DUP(op_AND_97)))); + RzILOpPure *op_LSHIFT_103 = SHIFTL0(CAST(64, MSB(op_MUL_100), DUP(op_MUL_100)), SN(32, 1)); + RzILOpPure *op_ADD_106 = ADD(op_LSHIFT_103, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_LT_109 = SLT(op_ADD_106, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_114 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_115 = NEG(op_LSHIFT_114); + RzILOpPure *op_LSHIFT_120 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_123 = SUB(op_LSHIFT_120, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_124 = ITE(op_LT_109, op_NEG_115, op_SUB_123); + RzILOpEffect *gcc_expr_125 = BRANCH(op_EQ_57, EMPTY(), set_usr_field_call_83); + + // h_tmp359 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_127 = SETL("h_tmp359", cond_124); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_128 = SEQN(2, gcc_expr_125, op_ASSIGN_hybrid_tmp_127); + + // Rd = ((st32) ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + ((st64) 0x8000)) ? (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + ((st64) 0x8000) : h_tmp359)); + RzILOpPure *op_RSHIFT_61 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_63 = LOGAND(op_RSHIFT_61, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_69 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_71 = LOGAND(op_RSHIFT_69, SN(32, 0xffff)); + RzILOpPure *op_MUL_74 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_63), DUP(op_AND_63))), CAST(16, MSB(DUP(op_AND_63)), DUP(op_AND_63))), CAST(32, MSB(CAST(16, MSB(op_AND_71), DUP(op_AND_71))), CAST(16, MSB(DUP(op_AND_71)), DUP(op_AND_71)))); + RzILOpPure *op_LSHIFT_77 = SHIFTL0(CAST(64, MSB(op_MUL_74), DUP(op_MUL_74)), SN(32, 1)); + RzILOpPure *op_ADD_80 = ADD(op_LSHIFT_77, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *cond_129 = ITE(DUP(op_EQ_57), op_ADD_80, VARL("h_tmp359")); + RzILOpEffect *op_ASSIGN_131 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_129), DUP(cond_129))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_132 = SEQN(2, seq_128, op_ASSIGN_131); + + RzILOpEffect *instruction_sequence = seq_132; + return instruction_sequence; +} + +// Rd = mpy(Rs.l,Rt.h):rnd:sat +RzILOpEffect *hex_il_op_m2_mpy_sat_rnd_lh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_77 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) + ((st64) 0x8000)), 0x0, 0x20) == ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpPure *op_MUL_22 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_19), DUP(op_AND_19))), CAST(16, MSB(DUP(op_AND_19)), DUP(op_AND_19)))); + RzILOpPure *op_ADD_26 = ADD(CAST(64, MSB(op_MUL_22), DUP(op_MUL_22)), CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_37 = LOGAND(op_RSHIFT_35, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_43 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_45 = LOGAND(op_RSHIFT_43, SN(32, 0xffff)); + RzILOpPure *op_MUL_48 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_37), DUP(op_AND_37))), CAST(16, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(CAST(16, MSB(op_AND_45), DUP(op_AND_45))), CAST(16, MSB(DUP(op_AND_45)), DUP(op_AND_45)))); + RzILOpPure *op_ADD_52 = ADD(CAST(64, MSB(op_MUL_48), DUP(op_MUL_48)), CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_EQ_53 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_26), SN(32, 0), SN(32, 0x20)), op_ADD_52); + RzILOpPure *op_RSHIFT_81 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_83 = LOGAND(op_RSHIFT_81, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_89 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_91 = LOGAND(op_RSHIFT_89, SN(32, 0xffff)); + RzILOpPure *op_MUL_94 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_83), DUP(op_AND_83))), CAST(16, MSB(DUP(op_AND_83)), DUP(op_AND_83))), CAST(32, MSB(CAST(16, MSB(op_AND_91), DUP(op_AND_91))), CAST(16, MSB(DUP(op_AND_91)), DUP(op_AND_91)))); + RzILOpPure *op_ADD_98 = ADD(CAST(64, MSB(op_MUL_94), DUP(op_MUL_94)), CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_LT_101 = SLT(op_ADD_98, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_106 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_107 = NEG(op_LSHIFT_106); + RzILOpPure *op_LSHIFT_112 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_115 = SUB(op_LSHIFT_112, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_116 = ITE(op_LT_101, op_NEG_107, op_SUB_115); + RzILOpEffect *gcc_expr_117 = BRANCH(op_EQ_53, EMPTY(), set_usr_field_call_77); + + // h_tmp360 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) + ((st64) 0x8000)), 0x0, 0x20) == ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_119 = SETL("h_tmp360", cond_116); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ...; + RzILOpEffect *seq_120 = SEQN(2, gcc_expr_117, op_ASSIGN_hybrid_tmp_119); + + // Rd = ((st32) ((sextract64(((ut64) ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) + ((st64) 0x8000)), 0x0, 0x20) == ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) + ((st64) 0x8000)) ? ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) + ((st64) 0x8000) : h_tmp360)); + RzILOpPure *op_RSHIFT_57 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_59 = LOGAND(op_RSHIFT_57, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_65 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_67 = LOGAND(op_RSHIFT_65, SN(32, 0xffff)); + RzILOpPure *op_MUL_70 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_59), DUP(op_AND_59))), CAST(16, MSB(DUP(op_AND_59)), DUP(op_AND_59))), CAST(32, MSB(CAST(16, MSB(op_AND_67), DUP(op_AND_67))), CAST(16, MSB(DUP(op_AND_67)), DUP(op_AND_67)))); + RzILOpPure *op_ADD_74 = ADD(CAST(64, MSB(op_MUL_70), DUP(op_MUL_70)), CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *cond_121 = ITE(DUP(op_EQ_53), op_ADD_74, VARL("h_tmp360")); + RzILOpEffect *op_ASSIGN_123 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_121), DUP(cond_121))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((s ...; + RzILOpEffect *seq_124 = SEQN(2, seq_120, op_ASSIGN_123); + + RzILOpEffect *instruction_sequence = seq_124; + return instruction_sequence; +} + +// Rd = mpy(Rs.l,Rt.h):<<1:rnd:sat +RzILOpEffect *hex_il_op_m2_mpy_sat_rnd_lh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_83 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpPure *op_MUL_22 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_19), DUP(op_AND_19))), CAST(16, MSB(DUP(op_AND_19)), DUP(op_AND_19)))); + RzILOpPure *op_LSHIFT_25 = SHIFTL0(CAST(64, MSB(op_MUL_22), DUP(op_MUL_22)), SN(32, 1)); + RzILOpPure *op_ADD_28 = ADD(op_LSHIFT_25, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_39 = LOGAND(op_RSHIFT_37, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_45 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_47 = LOGAND(op_RSHIFT_45, SN(32, 0xffff)); + RzILOpPure *op_MUL_50 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_39), DUP(op_AND_39))), CAST(16, MSB(DUP(op_AND_39)), DUP(op_AND_39))), CAST(32, MSB(CAST(16, MSB(op_AND_47), DUP(op_AND_47))), CAST(16, MSB(DUP(op_AND_47)), DUP(op_AND_47)))); + RzILOpPure *op_LSHIFT_53 = SHIFTL0(CAST(64, MSB(op_MUL_50), DUP(op_MUL_50)), SN(32, 1)); + RzILOpPure *op_ADD_56 = ADD(op_LSHIFT_53, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_EQ_57 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_28), SN(32, 0), SN(32, 0x20)), op_ADD_56); + RzILOpPure *op_RSHIFT_87 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_89 = LOGAND(op_RSHIFT_87, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_95 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_97 = LOGAND(op_RSHIFT_95, SN(32, 0xffff)); + RzILOpPure *op_MUL_100 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_89), DUP(op_AND_89))), CAST(16, MSB(DUP(op_AND_89)), DUP(op_AND_89))), CAST(32, MSB(CAST(16, MSB(op_AND_97), DUP(op_AND_97))), CAST(16, MSB(DUP(op_AND_97)), DUP(op_AND_97)))); + RzILOpPure *op_LSHIFT_103 = SHIFTL0(CAST(64, MSB(op_MUL_100), DUP(op_MUL_100)), SN(32, 1)); + RzILOpPure *op_ADD_106 = ADD(op_LSHIFT_103, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_LT_109 = SLT(op_ADD_106, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_114 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_115 = NEG(op_LSHIFT_114); + RzILOpPure *op_LSHIFT_120 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_123 = SUB(op_LSHIFT_120, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_124 = ITE(op_LT_109, op_NEG_115, op_SUB_123); + RzILOpEffect *gcc_expr_125 = BRANCH(op_EQ_57, EMPTY(), set_usr_field_call_83); + + // h_tmp361 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_127 = SETL("h_tmp361", cond_124); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_128 = SEQN(2, gcc_expr_125, op_ASSIGN_hybrid_tmp_127); + + // Rd = ((st32) ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000)) ? (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000) : h_tmp361)); + RzILOpPure *op_RSHIFT_61 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_63 = LOGAND(op_RSHIFT_61, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_69 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_71 = LOGAND(op_RSHIFT_69, SN(32, 0xffff)); + RzILOpPure *op_MUL_74 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_63), DUP(op_AND_63))), CAST(16, MSB(DUP(op_AND_63)), DUP(op_AND_63))), CAST(32, MSB(CAST(16, MSB(op_AND_71), DUP(op_AND_71))), CAST(16, MSB(DUP(op_AND_71)), DUP(op_AND_71)))); + RzILOpPure *op_LSHIFT_77 = SHIFTL0(CAST(64, MSB(op_MUL_74), DUP(op_MUL_74)), SN(32, 1)); + RzILOpPure *op_ADD_80 = ADD(op_LSHIFT_77, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *cond_129 = ITE(DUP(op_EQ_57), op_ADD_80, VARL("h_tmp361")); + RzILOpEffect *op_ASSIGN_131 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_129), DUP(cond_129))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_132 = SEQN(2, seq_128, op_ASSIGN_131); + + RzILOpEffect *instruction_sequence = seq_132; + return instruction_sequence; +} + +// Rd = mpy(Rs.l,Rt.l):rnd:sat +RzILOpEffect *hex_il_op_m2_mpy_sat_rnd_ll_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_77 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) + ((st64) 0x8000)), 0x0, 0x20) == ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpPure *op_MUL_22 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_19), DUP(op_AND_19))), CAST(16, MSB(DUP(op_AND_19)), DUP(op_AND_19)))); + RzILOpPure *op_ADD_26 = ADD(CAST(64, MSB(op_MUL_22), DUP(op_MUL_22)), CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_37 = LOGAND(op_RSHIFT_35, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_43 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_45 = LOGAND(op_RSHIFT_43, SN(32, 0xffff)); + RzILOpPure *op_MUL_48 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_37), DUP(op_AND_37))), CAST(16, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(CAST(16, MSB(op_AND_45), DUP(op_AND_45))), CAST(16, MSB(DUP(op_AND_45)), DUP(op_AND_45)))); + RzILOpPure *op_ADD_52 = ADD(CAST(64, MSB(op_MUL_48), DUP(op_MUL_48)), CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_EQ_53 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_26), SN(32, 0), SN(32, 0x20)), op_ADD_52); + RzILOpPure *op_RSHIFT_81 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_83 = LOGAND(op_RSHIFT_81, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_89 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_91 = LOGAND(op_RSHIFT_89, SN(32, 0xffff)); + RzILOpPure *op_MUL_94 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_83), DUP(op_AND_83))), CAST(16, MSB(DUP(op_AND_83)), DUP(op_AND_83))), CAST(32, MSB(CAST(16, MSB(op_AND_91), DUP(op_AND_91))), CAST(16, MSB(DUP(op_AND_91)), DUP(op_AND_91)))); + RzILOpPure *op_ADD_98 = ADD(CAST(64, MSB(op_MUL_94), DUP(op_MUL_94)), CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_LT_101 = SLT(op_ADD_98, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_106 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_107 = NEG(op_LSHIFT_106); + RzILOpPure *op_LSHIFT_112 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_115 = SUB(op_LSHIFT_112, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_116 = ITE(op_LT_101, op_NEG_107, op_SUB_115); + RzILOpEffect *gcc_expr_117 = BRANCH(op_EQ_53, EMPTY(), set_usr_field_call_77); + + // h_tmp362 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) + ((st64) 0x8000)), 0x0, 0x20) == ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_119 = SETL("h_tmp362", cond_116); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ...; + RzILOpEffect *seq_120 = SEQN(2, gcc_expr_117, op_ASSIGN_hybrid_tmp_119); + + // Rd = ((st32) ((sextract64(((ut64) ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) + ((st64) 0x8000)), 0x0, 0x20) == ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) + ((st64) 0x8000)) ? ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) + ((st64) 0x8000) : h_tmp362)); + RzILOpPure *op_RSHIFT_57 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_59 = LOGAND(op_RSHIFT_57, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_65 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_67 = LOGAND(op_RSHIFT_65, SN(32, 0xffff)); + RzILOpPure *op_MUL_70 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_59), DUP(op_AND_59))), CAST(16, MSB(DUP(op_AND_59)), DUP(op_AND_59))), CAST(32, MSB(CAST(16, MSB(op_AND_67), DUP(op_AND_67))), CAST(16, MSB(DUP(op_AND_67)), DUP(op_AND_67)))); + RzILOpPure *op_ADD_74 = ADD(CAST(64, MSB(op_MUL_70), DUP(op_MUL_70)), CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *cond_121 = ITE(DUP(op_EQ_53), op_ADD_74, VARL("h_tmp362")); + RzILOpEffect *op_ASSIGN_123 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_121), DUP(cond_121))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((s ...; + RzILOpEffect *seq_124 = SEQN(2, seq_120, op_ASSIGN_123); + + RzILOpEffect *instruction_sequence = seq_124; + return instruction_sequence; +} + +// Rd = mpy(Rs.l,Rt.l):<<1:rnd:sat +RzILOpEffect *hex_il_op_m2_mpy_sat_rnd_ll_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_83 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpPure *op_MUL_22 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_10), DUP(op_AND_10))), CAST(16, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(CAST(16, MSB(op_AND_19), DUP(op_AND_19))), CAST(16, MSB(DUP(op_AND_19)), DUP(op_AND_19)))); + RzILOpPure *op_LSHIFT_25 = SHIFTL0(CAST(64, MSB(op_MUL_22), DUP(op_MUL_22)), SN(32, 1)); + RzILOpPure *op_ADD_28 = ADD(op_LSHIFT_25, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_39 = LOGAND(op_RSHIFT_37, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_45 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_47 = LOGAND(op_RSHIFT_45, SN(32, 0xffff)); + RzILOpPure *op_MUL_50 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_39), DUP(op_AND_39))), CAST(16, MSB(DUP(op_AND_39)), DUP(op_AND_39))), CAST(32, MSB(CAST(16, MSB(op_AND_47), DUP(op_AND_47))), CAST(16, MSB(DUP(op_AND_47)), DUP(op_AND_47)))); + RzILOpPure *op_LSHIFT_53 = SHIFTL0(CAST(64, MSB(op_MUL_50), DUP(op_MUL_50)), SN(32, 1)); + RzILOpPure *op_ADD_56 = ADD(op_LSHIFT_53, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_EQ_57 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_28), SN(32, 0), SN(32, 0x20)), op_ADD_56); + RzILOpPure *op_RSHIFT_87 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_89 = LOGAND(op_RSHIFT_87, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_95 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_97 = LOGAND(op_RSHIFT_95, SN(32, 0xffff)); + RzILOpPure *op_MUL_100 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_89), DUP(op_AND_89))), CAST(16, MSB(DUP(op_AND_89)), DUP(op_AND_89))), CAST(32, MSB(CAST(16, MSB(op_AND_97), DUP(op_AND_97))), CAST(16, MSB(DUP(op_AND_97)), DUP(op_AND_97)))); + RzILOpPure *op_LSHIFT_103 = SHIFTL0(CAST(64, MSB(op_MUL_100), DUP(op_MUL_100)), SN(32, 1)); + RzILOpPure *op_ADD_106 = ADD(op_LSHIFT_103, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_LT_109 = SLT(op_ADD_106, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_114 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_115 = NEG(op_LSHIFT_114); + RzILOpPure *op_LSHIFT_120 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_123 = SUB(op_LSHIFT_120, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_124 = ITE(op_LT_109, op_NEG_115, op_SUB_123); + RzILOpEffect *gcc_expr_125 = BRANCH(op_EQ_57, EMPTY(), set_usr_field_call_83); + + // h_tmp363 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_127 = SETL("h_tmp363", cond_124); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_128 = SEQN(2, gcc_expr_125, op_ASSIGN_hybrid_tmp_127); + + // Rd = ((st32) ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + ((st64) 0x8000)) ? (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + ((st64) 0x8000) : h_tmp363)); + RzILOpPure *op_RSHIFT_61 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_63 = LOGAND(op_RSHIFT_61, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_69 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_71 = LOGAND(op_RSHIFT_69, SN(32, 0xffff)); + RzILOpPure *op_MUL_74 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_63), DUP(op_AND_63))), CAST(16, MSB(DUP(op_AND_63)), DUP(op_AND_63))), CAST(32, MSB(CAST(16, MSB(op_AND_71), DUP(op_AND_71))), CAST(16, MSB(DUP(op_AND_71)), DUP(op_AND_71)))); + RzILOpPure *op_LSHIFT_77 = SHIFTL0(CAST(64, MSB(op_MUL_74), DUP(op_MUL_74)), SN(32, 1)); + RzILOpPure *op_ADD_80 = ADD(op_LSHIFT_77, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *cond_129 = ITE(DUP(op_EQ_57), op_ADD_80, VARL("h_tmp363")); + RzILOpEffect *op_ASSIGN_131 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_129), DUP(cond_129))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_132 = SEQN(2, seq_128, op_ASSIGN_131); + + RzILOpEffect *instruction_sequence = seq_132; + return instruction_sequence; +} + +// Rd = mpy(Rs,Rt) +RzILOpEffect *hex_il_op_m2_mpy_up(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((st32) (((st64) Rs) * ((st64) Rt) >> 0x20)); + RzILOpPure *op_MUL_5 = MUL(CAST(64, MSB(Rs), DUP(Rs)), CAST(64, MSB(Rt), DUP(Rt))); + RzILOpPure *op_RSHIFT_7 = SHIFTRA(op_MUL_5, SN(32, 0x20)); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(op_RSHIFT_7), DUP(op_RSHIFT_7))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_9; + return instruction_sequence; +} + +// Rd = mpy(Rs,Rt):<<1 +RzILOpEffect *hex_il_op_m2_mpy_up_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((st32) (((st64) Rs) * ((st64) Rt) >> 0x1f)); + RzILOpPure *op_MUL_5 = MUL(CAST(64, MSB(Rs), DUP(Rs)), CAST(64, MSB(Rt), DUP(Rt))); + RzILOpPure *op_RSHIFT_7 = SHIFTRA(op_MUL_5, SN(32, 31)); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(op_RSHIFT_7), DUP(op_RSHIFT_7))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_9; + return instruction_sequence; +} + +// Rd = mpy(Rs,Rt):<<1:sat +RzILOpEffect *hex_il_op_m2_mpy_up_s1_sat(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_29 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) Rs) * ((st64) Rt) >> 0x1f)), 0x0, 0x20) == (((st64) Rs) * ((st64) Rt) >> 0x1f))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) Rs) * ((st64) Rt) >> 0x1f) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_MUL_8 = MUL(CAST(64, MSB(Rs), DUP(Rs)), CAST(64, MSB(Rt), DUP(Rt))); + RzILOpPure *op_RSHIFT_10 = SHIFTRA(op_MUL_8, SN(32, 31)); + RzILOpPure *op_MUL_18 = MUL(CAST(64, MSB(DUP(Rs)), DUP(Rs)), CAST(64, MSB(DUP(Rt)), DUP(Rt))); + RzILOpPure *op_RSHIFT_20 = SHIFTRA(op_MUL_18, SN(32, 31)); + RzILOpPure *op_EQ_21 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_10), SN(32, 0), SN(32, 0x20)), op_RSHIFT_20); + RzILOpPure *op_MUL_32 = MUL(CAST(64, MSB(DUP(Rs)), DUP(Rs)), CAST(64, MSB(DUP(Rt)), DUP(Rt))); + RzILOpPure *op_RSHIFT_34 = SHIFTRA(op_MUL_32, SN(32, 31)); + RzILOpPure *op_LT_37 = SLT(op_RSHIFT_34, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_42 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_43 = NEG(op_LSHIFT_42); + RzILOpPure *op_LSHIFT_48 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_51 = SUB(op_LSHIFT_48, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_52 = ITE(op_LT_37, op_NEG_43, op_SUB_51); + RzILOpEffect *gcc_expr_53 = BRANCH(op_EQ_21, EMPTY(), set_usr_field_call_29); + + // h_tmp364 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) Rs) * ((st64) Rt) >> 0x1f)), 0x0, 0x20) == (((st64) Rs) * ((st64) Rt) >> 0x1f))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) Rs) * ((st64) Rt) >> 0x1f) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_55 = SETL("h_tmp364", cond_52); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) Rs) * ((st64) ...; + RzILOpEffect *seq_56 = SEQN(2, gcc_expr_53, op_ASSIGN_hybrid_tmp_55); + + // Rd = ((st32) ((sextract64(((ut64) (((st64) Rs) * ((st64) Rt) >> 0x1f)), 0x0, 0x20) == (((st64) Rs) * ((st64) Rt) >> 0x1f)) ? (((st64) Rs) * ((st64) Rt) >> 0x1f) : h_tmp364)); + RzILOpPure *op_MUL_24 = MUL(CAST(64, MSB(DUP(Rs)), DUP(Rs)), CAST(64, MSB(DUP(Rt)), DUP(Rt))); + RzILOpPure *op_RSHIFT_26 = SHIFTRA(op_MUL_24, SN(32, 31)); + RzILOpPure *cond_57 = ITE(DUP(op_EQ_21), op_RSHIFT_26, VARL("h_tmp364")); + RzILOpEffect *op_ASSIGN_59 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_57), DUP(cond_57))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) Rs) * ((st ...; + RzILOpEffect *seq_60 = SEQN(2, seq_56, op_ASSIGN_59); + + RzILOpEffect *instruction_sequence = seq_60; + return instruction_sequence; +} + +// Rxx += mpy(Rs.h,Rt.h) +RzILOpEffect *hex_il_op_m2_mpyd_acc_hh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = Rxx + ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_ADD_21 = ADD(READ_REG(pkt, Rxx_op, false), CAST(64, MSB(op_MUL_19), DUP(op_MUL_19))); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rxx_op, op_ADD_21); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Rxx += mpy(Rs.h,Rt.h):<<1 +RzILOpEffect *hex_il_op_m2_mpyd_acc_hh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = Rxx + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), SN(32, 1)); + RzILOpPure *op_ADD_23 = ADD(READ_REG(pkt, Rxx_op, false), op_LSHIFT_22); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rxx_op, op_ADD_23); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rxx += mpy(Rs.h,Rt.l) +RzILOpEffect *hex_il_op_m2_mpyd_acc_hl_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = Rxx + ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_ADD_21 = ADD(READ_REG(pkt, Rxx_op, false), CAST(64, MSB(op_MUL_19), DUP(op_MUL_19))); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rxx_op, op_ADD_21); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Rxx += mpy(Rs.h,Rt.l):<<1 +RzILOpEffect *hex_il_op_m2_mpyd_acc_hl_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = Rxx + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), SN(32, 1)); + RzILOpPure *op_ADD_23 = ADD(READ_REG(pkt, Rxx_op, false), op_LSHIFT_22); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rxx_op, op_ADD_23); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rxx += mpy(Rs.l,Rt.h) +RzILOpEffect *hex_il_op_m2_mpyd_acc_lh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = Rxx + ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_ADD_21 = ADD(READ_REG(pkt, Rxx_op, false), CAST(64, MSB(op_MUL_19), DUP(op_MUL_19))); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rxx_op, op_ADD_21); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Rxx += mpy(Rs.l,Rt.h):<<1 +RzILOpEffect *hex_il_op_m2_mpyd_acc_lh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = Rxx + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), SN(32, 1)); + RzILOpPure *op_ADD_23 = ADD(READ_REG(pkt, Rxx_op, false), op_LSHIFT_22); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rxx_op, op_ADD_23); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rxx += mpy(Rs.l,Rt.l) +RzILOpEffect *hex_il_op_m2_mpyd_acc_ll_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = Rxx + ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_ADD_21 = ADD(READ_REG(pkt, Rxx_op, false), CAST(64, MSB(op_MUL_19), DUP(op_MUL_19))); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rxx_op, op_ADD_21); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Rxx += mpy(Rs.l,Rt.l):<<1 +RzILOpEffect *hex_il_op_m2_mpyd_acc_ll_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = Rxx + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), SN(32, 1)); + RzILOpPure *op_ADD_23 = ADD(READ_REG(pkt, Rxx_op, false), op_LSHIFT_22); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rxx_op, op_ADD_23); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rdd = mpy(Rs.h,Rt.h) +RzILOpEffect *hex_il_op_m2_mpyd_hh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rdd = ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpEffect *op_ASSIGN_21 = WRITE_REG(bundle, Rdd_op, CAST(64, MSB(op_MUL_19), DUP(op_MUL_19))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_21; + return instruction_sequence; +} + +// Rdd = mpy(Rs.h,Rt.h):<<1 +RzILOpEffect *hex_il_op_m2_mpyd_hh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rdd = (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), SN(32, 1)); + RzILOpEffect *op_ASSIGN_23 = WRITE_REG(bundle, Rdd_op, op_LSHIFT_22); + + RzILOpEffect *instruction_sequence = op_ASSIGN_23; + return instruction_sequence; +} + +// Rdd = mpy(Rs.h,Rt.l) +RzILOpEffect *hex_il_op_m2_mpyd_hl_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rdd = ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpEffect *op_ASSIGN_21 = WRITE_REG(bundle, Rdd_op, CAST(64, MSB(op_MUL_19), DUP(op_MUL_19))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_21; + return instruction_sequence; +} + +// Rdd = mpy(Rs.h,Rt.l):<<1 +RzILOpEffect *hex_il_op_m2_mpyd_hl_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rdd = (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), SN(32, 1)); + RzILOpEffect *op_ASSIGN_23 = WRITE_REG(bundle, Rdd_op, op_LSHIFT_22); + + RzILOpEffect *instruction_sequence = op_ASSIGN_23; + return instruction_sequence; +} + +// Rdd = mpy(Rs.l,Rt.h) +RzILOpEffect *hex_il_op_m2_mpyd_lh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rdd = ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpEffect *op_ASSIGN_21 = WRITE_REG(bundle, Rdd_op, CAST(64, MSB(op_MUL_19), DUP(op_MUL_19))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_21; + return instruction_sequence; +} + +// Rdd = mpy(Rs.l,Rt.h):<<1 +RzILOpEffect *hex_il_op_m2_mpyd_lh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rdd = (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), SN(32, 1)); + RzILOpEffect *op_ASSIGN_23 = WRITE_REG(bundle, Rdd_op, op_LSHIFT_22); + + RzILOpEffect *instruction_sequence = op_ASSIGN_23; + return instruction_sequence; +} + +// Rdd = mpy(Rs.l,Rt.l) +RzILOpEffect *hex_il_op_m2_mpyd_ll_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rdd = ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpEffect *op_ASSIGN_21 = WRITE_REG(bundle, Rdd_op, CAST(64, MSB(op_MUL_19), DUP(op_MUL_19))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_21; + return instruction_sequence; +} + +// Rdd = mpy(Rs.l,Rt.l):<<1 +RzILOpEffect *hex_il_op_m2_mpyd_ll_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rdd = (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), SN(32, 1)); + RzILOpEffect *op_ASSIGN_23 = WRITE_REG(bundle, Rdd_op, op_LSHIFT_22); + + RzILOpEffect *instruction_sequence = op_ASSIGN_23; + return instruction_sequence; +} + +// Rxx -= mpy(Rs.h,Rt.h) +RzILOpEffect *hex_il_op_m2_mpyd_nac_hh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = Rxx - ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_SUB_21 = SUB(READ_REG(pkt, Rxx_op, false), CAST(64, MSB(op_MUL_19), DUP(op_MUL_19))); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rxx_op, op_SUB_21); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Rxx -= mpy(Rs.h,Rt.h):<<1 +RzILOpEffect *hex_il_op_m2_mpyd_nac_hh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = Rxx - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), SN(32, 1)); + RzILOpPure *op_SUB_23 = SUB(READ_REG(pkt, Rxx_op, false), op_LSHIFT_22); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rxx_op, op_SUB_23); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rxx -= mpy(Rs.h,Rt.l) +RzILOpEffect *hex_il_op_m2_mpyd_nac_hl_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = Rxx - ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_SUB_21 = SUB(READ_REG(pkt, Rxx_op, false), CAST(64, MSB(op_MUL_19), DUP(op_MUL_19))); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rxx_op, op_SUB_21); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Rxx -= mpy(Rs.h,Rt.l):<<1 +RzILOpEffect *hex_il_op_m2_mpyd_nac_hl_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = Rxx - (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), SN(32, 1)); + RzILOpPure *op_SUB_23 = SUB(READ_REG(pkt, Rxx_op, false), op_LSHIFT_22); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rxx_op, op_SUB_23); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rxx -= mpy(Rs.l,Rt.h) +RzILOpEffect *hex_il_op_m2_mpyd_nac_lh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = Rxx - ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_SUB_21 = SUB(READ_REG(pkt, Rxx_op, false), CAST(64, MSB(op_MUL_19), DUP(op_MUL_19))); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rxx_op, op_SUB_21); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Rxx -= mpy(Rs.l,Rt.h):<<1 +RzILOpEffect *hex_il_op_m2_mpyd_nac_lh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = Rxx - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), SN(32, 1)); + RzILOpPure *op_SUB_23 = SUB(READ_REG(pkt, Rxx_op, false), op_LSHIFT_22); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rxx_op, op_SUB_23); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rxx -= mpy(Rs.l,Rt.l) +RzILOpEffect *hex_il_op_m2_mpyd_nac_ll_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = Rxx - ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_SUB_21 = SUB(READ_REG(pkt, Rxx_op, false), CAST(64, MSB(op_MUL_19), DUP(op_MUL_19))); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rxx_op, op_SUB_21); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Rxx -= mpy(Rs.l,Rt.l):<<1 +RzILOpEffect *hex_il_op_m2_mpyd_nac_ll_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = Rxx - (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), SN(32, 1)); + RzILOpPure *op_SUB_23 = SUB(READ_REG(pkt, Rxx_op, false), op_LSHIFT_22); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rxx_op, op_SUB_23); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rdd = mpy(Rs.h,Rt.h):rnd +RzILOpEffect *hex_il_op_m2_mpyd_rnd_hh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rdd = ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) + ((st64) 0x8000); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_ADD_23 = ADD(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rdd_op, op_ADD_23); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rdd = mpy(Rs.h,Rt.h):<<1:rnd +RzILOpEffect *hex_il_op_m2_mpyd_rnd_hh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rdd = (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), SN(32, 1)); + RzILOpPure *op_ADD_25 = ADD(op_LSHIFT_22, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpEffect *op_ASSIGN_26 = WRITE_REG(bundle, Rdd_op, op_ADD_25); + + RzILOpEffect *instruction_sequence = op_ASSIGN_26; + return instruction_sequence; +} + +// Rdd = mpy(Rs.h,Rt.l):rnd +RzILOpEffect *hex_il_op_m2_mpyd_rnd_hl_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rdd = ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) + ((st64) 0x8000); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_ADD_23 = ADD(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rdd_op, op_ADD_23); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rdd = mpy(Rs.h,Rt.l):<<1:rnd +RzILOpEffect *hex_il_op_m2_mpyd_rnd_hl_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rdd = (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + ((st64) 0x8000); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), SN(32, 1)); + RzILOpPure *op_ADD_25 = ADD(op_LSHIFT_22, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpEffect *op_ASSIGN_26 = WRITE_REG(bundle, Rdd_op, op_ADD_25); + + RzILOpEffect *instruction_sequence = op_ASSIGN_26; + return instruction_sequence; +} + +// Rdd = mpy(Rs.l,Rt.h):rnd +RzILOpEffect *hex_il_op_m2_mpyd_rnd_lh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rdd = ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) + ((st64) 0x8000); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_ADD_23 = ADD(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rdd_op, op_ADD_23); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rdd = mpy(Rs.l,Rt.h):<<1:rnd +RzILOpEffect *hex_il_op_m2_mpyd_rnd_lh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rdd = (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), SN(32, 1)); + RzILOpPure *op_ADD_25 = ADD(op_LSHIFT_22, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpEffect *op_ASSIGN_26 = WRITE_REG(bundle, Rdd_op, op_ADD_25); + + RzILOpEffect *instruction_sequence = op_ASSIGN_26; + return instruction_sequence; +} + +// Rdd = mpy(Rs.l,Rt.l):rnd +RzILOpEffect *hex_il_op_m2_mpyd_rnd_ll_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rdd = ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) + ((st64) 0x8000); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_ADD_23 = ADD(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rdd_op, op_ADD_23); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rdd = mpy(Rs.l,Rt.l):<<1:rnd +RzILOpEffect *hex_il_op_m2_mpyd_rnd_ll_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rdd = (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + ((st64) 0x8000); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_7), DUP(op_AND_7))), CAST(16, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(CAST(16, MSB(op_AND_16), DUP(op_AND_16))), CAST(16, MSB(DUP(op_AND_16)), DUP(op_AND_16)))); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(CAST(64, MSB(op_MUL_19), DUP(op_MUL_19)), SN(32, 1)); + RzILOpPure *op_ADD_25 = ADD(op_LSHIFT_22, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpEffect *op_ASSIGN_26 = WRITE_REG(bundle, Rdd_op, op_ADD_25); + + RzILOpEffect *instruction_sequence = op_ASSIGN_26; + return instruction_sequence; +} + +// Rd = mpyi(Rs,Rt) +RzILOpEffect *hex_il_op_m2_mpyi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = Rs * Rt; + RzILOpPure *op_MUL_3 = MUL(Rs, Rt); + RzILOpEffect *op_ASSIGN_4 = WRITE_REG(bundle, Rd_op, op_MUL_3); + + RzILOpEffect *instruction_sequence = op_ASSIGN_4; + return instruction_sequence; +} + +// Rd = -mpyi(Rs,Ii) +RzILOpEffect *hex_il_op_m2_mpysin(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // Rd = ((st32) ((ut32) Rs) * (-u)); + RzILOpPure *op_NEG_4 = NEG(VARL("u")); + RzILOpPure *op_MUL_6 = MUL(CAST(32, IL_FALSE, Rs), op_NEG_4); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_MUL_6)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, op_ASSIGN_8); + return instruction_sequence; +} + +// Rd = +mpyi(Rs,Ii) +RzILOpEffect *hex_il_op_m2_mpysip(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // Rd = ((st32) ((ut32) Rs) * u); + RzILOpPure *op_MUL_5 = MUL(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_MUL_5)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_0, op_ASSIGN_7); + return instruction_sequence; +} + +// Rd = mpysu(Rs,Rt) +RzILOpEffect *hex_il_op_m2_mpysu_up(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((st32) (((ut64) ((st64) Rs)) * ((ut64) ((ut32) Rt)) >> 0x20)); + RzILOpPure *op_MUL_7 = MUL(CAST(64, IL_FALSE, CAST(64, MSB(Rs), DUP(Rs))), CAST(64, IL_FALSE, CAST(32, IL_FALSE, Rt))); + RzILOpPure *op_RSHIFT_9 = SHIFTR0(op_MUL_7, SN(32, 0x20)); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_RSHIFT_9)); + + RzILOpEffect *instruction_sequence = op_ASSIGN_11; + return instruction_sequence; +} + +// Rx += mpyu(Rs.h,Rt.h) +RzILOpEffect *hex_il_op_m2_mpyu_acc_hh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = ((st32) ((ut64) Rx) + ((ut64) ((ut32) ((ut16) ((Rs >> 0x10) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_ADD_22 = ADD(CAST(64, IL_FALSE, READ_REG(pkt, Rx_op, false)), CAST(64, IL_FALSE, op_MUL_19)); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_ADD_22)); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rx += mpyu(Rs.h,Rt.h):<<1 +RzILOpEffect *hex_il_op_m2_mpyu_acc_hh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = ((st32) ((st64) Rx) + (((st64) ((ut64) ((ut32) ((ut16) ((Rs >> 0x10) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x1)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(CAST(64, IL_FALSE, CAST(64, IL_FALSE, op_MUL_19)), SN(32, 1)); + RzILOpPure *op_ADD_25 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_23); + RzILOpEffect *op_ASSIGN_27 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(op_ADD_25), DUP(op_ADD_25))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_27; + return instruction_sequence; +} + +// Rx += mpyu(Rs.h,Rt.l) +RzILOpEffect *hex_il_op_m2_mpyu_acc_hl_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = ((st32) ((ut64) Rx) + ((ut64) ((ut32) ((ut16) ((Rs >> 0x10) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_ADD_22 = ADD(CAST(64, IL_FALSE, READ_REG(pkt, Rx_op, false)), CAST(64, IL_FALSE, op_MUL_19)); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_ADD_22)); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rx += mpyu(Rs.h,Rt.l):<<1 +RzILOpEffect *hex_il_op_m2_mpyu_acc_hl_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = ((st32) ((st64) Rx) + (((st64) ((ut64) ((ut32) ((ut16) ((Rs >> 0x10) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x1)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(CAST(64, IL_FALSE, CAST(64, IL_FALSE, op_MUL_19)), SN(32, 1)); + RzILOpPure *op_ADD_25 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_23); + RzILOpEffect *op_ASSIGN_27 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(op_ADD_25), DUP(op_ADD_25))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_27; + return instruction_sequence; +} + +// Rx += mpyu(Rs.l,Rt.h) +RzILOpEffect *hex_il_op_m2_mpyu_acc_lh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = ((st32) ((ut64) Rx) + ((ut64) ((ut32) ((ut16) ((Rs >> 0x0) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_ADD_22 = ADD(CAST(64, IL_FALSE, READ_REG(pkt, Rx_op, false)), CAST(64, IL_FALSE, op_MUL_19)); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_ADD_22)); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rx += mpyu(Rs.l,Rt.h):<<1 +RzILOpEffect *hex_il_op_m2_mpyu_acc_lh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = ((st32) ((st64) Rx) + (((st64) ((ut64) ((ut32) ((ut16) ((Rs >> 0x0) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x1)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(CAST(64, IL_FALSE, CAST(64, IL_FALSE, op_MUL_19)), SN(32, 1)); + RzILOpPure *op_ADD_25 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_23); + RzILOpEffect *op_ASSIGN_27 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(op_ADD_25), DUP(op_ADD_25))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_27; + return instruction_sequence; +} + +// Rx += mpyu(Rs.l,Rt.l) +RzILOpEffect *hex_il_op_m2_mpyu_acc_ll_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = ((st32) ((ut64) Rx) + ((ut64) ((ut32) ((ut16) ((Rs >> 0x0) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_ADD_22 = ADD(CAST(64, IL_FALSE, READ_REG(pkt, Rx_op, false)), CAST(64, IL_FALSE, op_MUL_19)); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_ADD_22)); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rx += mpyu(Rs.l,Rt.l):<<1 +RzILOpEffect *hex_il_op_m2_mpyu_acc_ll_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = ((st32) ((st64) Rx) + (((st64) ((ut64) ((ut32) ((ut16) ((Rs >> 0x0) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x1)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(CAST(64, IL_FALSE, CAST(64, IL_FALSE, op_MUL_19)), SN(32, 1)); + RzILOpPure *op_ADD_25 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_23); + RzILOpEffect *op_ASSIGN_27 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(op_ADD_25), DUP(op_ADD_25))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_27; + return instruction_sequence; +} + +// Rd = mpyu(Rs.h,Rt.h) +RzILOpEffect *hex_il_op_m2_mpyu_hh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((st32) ((ut64) ((ut32) ((ut16) ((Rs >> 0x10) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(64, IL_FALSE, op_MUL_19))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Rd = mpyu(Rs.h,Rt.h):<<1 +RzILOpEffect *hex_il_op_m2_mpyu_hh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((st32) (((st64) ((ut64) ((ut32) ((ut16) ((Rs >> 0x10) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x1)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(CAST(64, IL_FALSE, CAST(64, IL_FALSE, op_MUL_19)), SN(32, 1)); + RzILOpEffect *op_ASSIGN_25 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(op_LSHIFT_23), DUP(op_LSHIFT_23))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_25; + return instruction_sequence; +} + +// Rd = mpyu(Rs.h,Rt.l) +RzILOpEffect *hex_il_op_m2_mpyu_hl_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((st32) ((ut64) ((ut32) ((ut16) ((Rs >> 0x10) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(64, IL_FALSE, op_MUL_19))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Rd = mpyu(Rs.h,Rt.l):<<1 +RzILOpEffect *hex_il_op_m2_mpyu_hl_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((st32) (((st64) ((ut64) ((ut32) ((ut16) ((Rs >> 0x10) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x1)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(CAST(64, IL_FALSE, CAST(64, IL_FALSE, op_MUL_19)), SN(32, 1)); + RzILOpEffect *op_ASSIGN_25 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(op_LSHIFT_23), DUP(op_LSHIFT_23))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_25; + return instruction_sequence; +} + +// Rd = mpyu(Rs.l,Rt.h) +RzILOpEffect *hex_il_op_m2_mpyu_lh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((st32) ((ut64) ((ut32) ((ut16) ((Rs >> 0x0) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(64, IL_FALSE, op_MUL_19))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Rd = mpyu(Rs.l,Rt.h):<<1 +RzILOpEffect *hex_il_op_m2_mpyu_lh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((st32) (((st64) ((ut64) ((ut32) ((ut16) ((Rs >> 0x0) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x1)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(CAST(64, IL_FALSE, CAST(64, IL_FALSE, op_MUL_19)), SN(32, 1)); + RzILOpEffect *op_ASSIGN_25 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(op_LSHIFT_23), DUP(op_LSHIFT_23))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_25; + return instruction_sequence; +} + +// Rd = mpyu(Rs.l,Rt.l) +RzILOpEffect *hex_il_op_m2_mpyu_ll_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((st32) ((ut64) ((ut32) ((ut16) ((Rs >> 0x0) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(64, IL_FALSE, op_MUL_19))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Rd = mpyu(Rs.l,Rt.l):<<1 +RzILOpEffect *hex_il_op_m2_mpyu_ll_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((st32) (((st64) ((ut64) ((ut32) ((ut16) ((Rs >> 0x0) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x1)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(CAST(64, IL_FALSE, CAST(64, IL_FALSE, op_MUL_19)), SN(32, 1)); + RzILOpEffect *op_ASSIGN_25 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(op_LSHIFT_23), DUP(op_LSHIFT_23))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_25; + return instruction_sequence; +} + +// Rx -= mpyu(Rs.h,Rt.h) +RzILOpEffect *hex_il_op_m2_mpyu_nac_hh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = ((st32) ((ut64) Rx) - ((ut64) ((ut32) ((ut16) ((Rs >> 0x10) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_SUB_22 = SUB(CAST(64, IL_FALSE, READ_REG(pkt, Rx_op, false)), CAST(64, IL_FALSE, op_MUL_19)); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_SUB_22)); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rx -= mpyu(Rs.h,Rt.h):<<1 +RzILOpEffect *hex_il_op_m2_mpyu_nac_hh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = ((st32) ((st64) Rx) - (((st64) ((ut64) ((ut32) ((ut16) ((Rs >> 0x10) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x1)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(CAST(64, IL_FALSE, CAST(64, IL_FALSE, op_MUL_19)), SN(32, 1)); + RzILOpPure *op_SUB_25 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_23); + RzILOpEffect *op_ASSIGN_27 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(op_SUB_25), DUP(op_SUB_25))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_27; + return instruction_sequence; +} + +// Rx -= mpyu(Rs.h,Rt.l) +RzILOpEffect *hex_il_op_m2_mpyu_nac_hl_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = ((st32) ((ut64) Rx) - ((ut64) ((ut32) ((ut16) ((Rs >> 0x10) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_SUB_22 = SUB(CAST(64, IL_FALSE, READ_REG(pkt, Rx_op, false)), CAST(64, IL_FALSE, op_MUL_19)); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_SUB_22)); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rx -= mpyu(Rs.h,Rt.l):<<1 +RzILOpEffect *hex_il_op_m2_mpyu_nac_hl_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = ((st32) ((st64) Rx) - (((st64) ((ut64) ((ut32) ((ut16) ((Rs >> 0x10) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x1)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(CAST(64, IL_FALSE, CAST(64, IL_FALSE, op_MUL_19)), SN(32, 1)); + RzILOpPure *op_SUB_25 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_23); + RzILOpEffect *op_ASSIGN_27 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(op_SUB_25), DUP(op_SUB_25))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_27; + return instruction_sequence; +} + +// Rx -= mpyu(Rs.l,Rt.h) +RzILOpEffect *hex_il_op_m2_mpyu_nac_lh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = ((st32) ((ut64) Rx) - ((ut64) ((ut32) ((ut16) ((Rs >> 0x0) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_SUB_22 = SUB(CAST(64, IL_FALSE, READ_REG(pkt, Rx_op, false)), CAST(64, IL_FALSE, op_MUL_19)); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_SUB_22)); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rx -= mpyu(Rs.l,Rt.h):<<1 +RzILOpEffect *hex_il_op_m2_mpyu_nac_lh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = ((st32) ((st64) Rx) - (((st64) ((ut64) ((ut32) ((ut16) ((Rs >> 0x0) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x1)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(CAST(64, IL_FALSE, CAST(64, IL_FALSE, op_MUL_19)), SN(32, 1)); + RzILOpPure *op_SUB_25 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_23); + RzILOpEffect *op_ASSIGN_27 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(op_SUB_25), DUP(op_SUB_25))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_27; + return instruction_sequence; +} + +// Rx -= mpyu(Rs.l,Rt.l) +RzILOpEffect *hex_il_op_m2_mpyu_nac_ll_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = ((st32) ((ut64) Rx) - ((ut64) ((ut32) ((ut16) ((Rs >> 0x0) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_SUB_22 = SUB(CAST(64, IL_FALSE, READ_REG(pkt, Rx_op, false)), CAST(64, IL_FALSE, op_MUL_19)); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_SUB_22)); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rx -= mpyu(Rs.l,Rt.l):<<1 +RzILOpEffect *hex_il_op_m2_mpyu_nac_ll_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = ((st32) ((st64) Rx) - (((st64) ((ut64) ((ut32) ((ut16) ((Rs >> 0x0) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x1)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(CAST(64, IL_FALSE, CAST(64, IL_FALSE, op_MUL_19)), SN(32, 1)); + RzILOpPure *op_SUB_25 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_LSHIFT_23); + RzILOpEffect *op_ASSIGN_27 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(op_SUB_25), DUP(op_SUB_25))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_27; + return instruction_sequence; +} + +// Rd = mpyu(Rs,Rt) +RzILOpEffect *hex_il_op_m2_mpyu_up(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((st32) (((ut64) ((ut32) Rs)) * ((ut64) ((ut32) Rt)) >> 0x20)); + RzILOpPure *op_MUL_7 = MUL(CAST(64, IL_FALSE, CAST(32, IL_FALSE, Rs)), CAST(64, IL_FALSE, CAST(32, IL_FALSE, Rt))); + RzILOpPure *op_RSHIFT_9 = SHIFTR0(op_MUL_7, SN(32, 0x20)); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_RSHIFT_9)); + + RzILOpEffect *instruction_sequence = op_ASSIGN_11; + return instruction_sequence; +} + +// Rxx += mpyu(Rs.h,Rt.h) +RzILOpEffect *hex_il_op_m2_mpyud_acc_hh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = ((st64) ((ut64) Rxx) + ((ut64) ((ut32) ((ut16) ((Rs >> 0x10) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_ADD_22 = ADD(CAST(64, IL_FALSE, READ_REG(pkt, Rxx_op, false)), CAST(64, IL_FALSE, op_MUL_19)); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_ADD_22)); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rxx += mpyu(Rs.h,Rt.h):<<1 +RzILOpEffect *hex_il_op_m2_mpyud_acc_hh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = Rxx + (((st64) ((ut64) ((ut32) ((ut16) ((Rs >> 0x10) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x1); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(CAST(64, IL_FALSE, CAST(64, IL_FALSE, op_MUL_19)), SN(32, 1)); + RzILOpPure *op_ADD_24 = ADD(READ_REG(pkt, Rxx_op, false), op_LSHIFT_23); + RzILOpEffect *op_ASSIGN_25 = WRITE_REG(bundle, Rxx_op, op_ADD_24); + + RzILOpEffect *instruction_sequence = op_ASSIGN_25; + return instruction_sequence; +} + +// Rxx += mpyu(Rs.h,Rt.l) +RzILOpEffect *hex_il_op_m2_mpyud_acc_hl_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = ((st64) ((ut64) Rxx) + ((ut64) ((ut32) ((ut16) ((Rs >> 0x10) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_ADD_22 = ADD(CAST(64, IL_FALSE, READ_REG(pkt, Rxx_op, false)), CAST(64, IL_FALSE, op_MUL_19)); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_ADD_22)); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rxx += mpyu(Rs.h,Rt.l):<<1 +RzILOpEffect *hex_il_op_m2_mpyud_acc_hl_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = Rxx + (((st64) ((ut64) ((ut32) ((ut16) ((Rs >> 0x10) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x1); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(CAST(64, IL_FALSE, CAST(64, IL_FALSE, op_MUL_19)), SN(32, 1)); + RzILOpPure *op_ADD_24 = ADD(READ_REG(pkt, Rxx_op, false), op_LSHIFT_23); + RzILOpEffect *op_ASSIGN_25 = WRITE_REG(bundle, Rxx_op, op_ADD_24); + + RzILOpEffect *instruction_sequence = op_ASSIGN_25; + return instruction_sequence; +} + +// Rxx += mpyu(Rs.l,Rt.h) +RzILOpEffect *hex_il_op_m2_mpyud_acc_lh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = ((st64) ((ut64) Rxx) + ((ut64) ((ut32) ((ut16) ((Rs >> 0x0) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_ADD_22 = ADD(CAST(64, IL_FALSE, READ_REG(pkt, Rxx_op, false)), CAST(64, IL_FALSE, op_MUL_19)); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_ADD_22)); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rxx += mpyu(Rs.l,Rt.h):<<1 +RzILOpEffect *hex_il_op_m2_mpyud_acc_lh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = Rxx + (((st64) ((ut64) ((ut32) ((ut16) ((Rs >> 0x0) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x1); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(CAST(64, IL_FALSE, CAST(64, IL_FALSE, op_MUL_19)), SN(32, 1)); + RzILOpPure *op_ADD_24 = ADD(READ_REG(pkt, Rxx_op, false), op_LSHIFT_23); + RzILOpEffect *op_ASSIGN_25 = WRITE_REG(bundle, Rxx_op, op_ADD_24); + + RzILOpEffect *instruction_sequence = op_ASSIGN_25; + return instruction_sequence; +} + +// Rxx += mpyu(Rs.l,Rt.l) +RzILOpEffect *hex_il_op_m2_mpyud_acc_ll_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = ((st64) ((ut64) Rxx) + ((ut64) ((ut32) ((ut16) ((Rs >> 0x0) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_ADD_22 = ADD(CAST(64, IL_FALSE, READ_REG(pkt, Rxx_op, false)), CAST(64, IL_FALSE, op_MUL_19)); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_ADD_22)); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rxx += mpyu(Rs.l,Rt.l):<<1 +RzILOpEffect *hex_il_op_m2_mpyud_acc_ll_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = Rxx + (((st64) ((ut64) ((ut32) ((ut16) ((Rs >> 0x0) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x1); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(CAST(64, IL_FALSE, CAST(64, IL_FALSE, op_MUL_19)), SN(32, 1)); + RzILOpPure *op_ADD_24 = ADD(READ_REG(pkt, Rxx_op, false), op_LSHIFT_23); + RzILOpEffect *op_ASSIGN_25 = WRITE_REG(bundle, Rxx_op, op_ADD_24); + + RzILOpEffect *instruction_sequence = op_ASSIGN_25; + return instruction_sequence; +} + +// Rdd = mpyu(Rs.h,Rt.h) +RzILOpEffect *hex_il_op_m2_mpyud_hh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rdd = ((st64) ((ut64) ((ut32) ((ut16) ((Rs >> 0x10) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, CAST(64, IL_FALSE, op_MUL_19))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Rdd = mpyu(Rs.h,Rt.h):<<1 +RzILOpEffect *hex_il_op_m2_mpyud_hh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rdd = (((st64) ((ut64) ((ut32) ((ut16) ((Rs >> 0x10) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x1); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(CAST(64, IL_FALSE, CAST(64, IL_FALSE, op_MUL_19)), SN(32, 1)); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rdd_op, op_LSHIFT_23); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rdd = mpyu(Rs.h,Rt.l) +RzILOpEffect *hex_il_op_m2_mpyud_hl_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rdd = ((st64) ((ut64) ((ut32) ((ut16) ((Rs >> 0x10) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, CAST(64, IL_FALSE, op_MUL_19))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Rdd = mpyu(Rs.h,Rt.l):<<1 +RzILOpEffect *hex_il_op_m2_mpyud_hl_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rdd = (((st64) ((ut64) ((ut32) ((ut16) ((Rs >> 0x10) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x1); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(CAST(64, IL_FALSE, CAST(64, IL_FALSE, op_MUL_19)), SN(32, 1)); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rdd_op, op_LSHIFT_23); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rdd = mpyu(Rs.l,Rt.h) +RzILOpEffect *hex_il_op_m2_mpyud_lh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rdd = ((st64) ((ut64) ((ut32) ((ut16) ((Rs >> 0x0) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, CAST(64, IL_FALSE, op_MUL_19))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Rdd = mpyu(Rs.l,Rt.h):<<1 +RzILOpEffect *hex_il_op_m2_mpyud_lh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rdd = (((st64) ((ut64) ((ut32) ((ut16) ((Rs >> 0x0) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x1); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(CAST(64, IL_FALSE, CAST(64, IL_FALSE, op_MUL_19)), SN(32, 1)); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rdd_op, op_LSHIFT_23); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rdd = mpyu(Rs.l,Rt.l) +RzILOpEffect *hex_il_op_m2_mpyud_ll_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rdd = ((st64) ((ut64) ((ut32) ((ut16) ((Rs >> 0x0) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, CAST(64, IL_FALSE, op_MUL_19))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_22; + return instruction_sequence; +} + +// Rdd = mpyu(Rs.l,Rt.l):<<1 +RzILOpEffect *hex_il_op_m2_mpyud_ll_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rdd = (((st64) ((ut64) ((ut32) ((ut16) ((Rs >> 0x0) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x1); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(CAST(64, IL_FALSE, CAST(64, IL_FALSE, op_MUL_19)), SN(32, 1)); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rdd_op, op_LSHIFT_23); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rxx -= mpyu(Rs.h,Rt.h) +RzILOpEffect *hex_il_op_m2_mpyud_nac_hh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = ((st64) ((ut64) Rxx) - ((ut64) ((ut32) ((ut16) ((Rs >> 0x10) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_SUB_22 = SUB(CAST(64, IL_FALSE, READ_REG(pkt, Rxx_op, false)), CAST(64, IL_FALSE, op_MUL_19)); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_SUB_22)); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rxx -= mpyu(Rs.h,Rt.h):<<1 +RzILOpEffect *hex_il_op_m2_mpyud_nac_hh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = Rxx - (((st64) ((ut64) ((ut32) ((ut16) ((Rs >> 0x10) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x1); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(CAST(64, IL_FALSE, CAST(64, IL_FALSE, op_MUL_19)), SN(32, 1)); + RzILOpPure *op_SUB_24 = SUB(READ_REG(pkt, Rxx_op, false), op_LSHIFT_23); + RzILOpEffect *op_ASSIGN_25 = WRITE_REG(bundle, Rxx_op, op_SUB_24); + + RzILOpEffect *instruction_sequence = op_ASSIGN_25; + return instruction_sequence; +} + +// Rxx -= mpyu(Rs.h,Rt.l) +RzILOpEffect *hex_il_op_m2_mpyud_nac_hl_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = ((st64) ((ut64) Rxx) - ((ut64) ((ut32) ((ut16) ((Rs >> 0x10) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_SUB_22 = SUB(CAST(64, IL_FALSE, READ_REG(pkt, Rxx_op, false)), CAST(64, IL_FALSE, op_MUL_19)); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_SUB_22)); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rxx -= mpyu(Rs.h,Rt.l):<<1 +RzILOpEffect *hex_il_op_m2_mpyud_nac_hl_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = Rxx - (((st64) ((ut64) ((ut32) ((ut16) ((Rs >> 0x10) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x1); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(CAST(64, IL_FALSE, CAST(64, IL_FALSE, op_MUL_19)), SN(32, 1)); + RzILOpPure *op_SUB_24 = SUB(READ_REG(pkt, Rxx_op, false), op_LSHIFT_23); + RzILOpEffect *op_ASSIGN_25 = WRITE_REG(bundle, Rxx_op, op_SUB_24); + + RzILOpEffect *instruction_sequence = op_ASSIGN_25; + return instruction_sequence; +} + +// Rxx -= mpyu(Rs.l,Rt.h) +RzILOpEffect *hex_il_op_m2_mpyud_nac_lh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = ((st64) ((ut64) Rxx) - ((ut64) ((ut32) ((ut16) ((Rs >> 0x0) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_SUB_22 = SUB(CAST(64, IL_FALSE, READ_REG(pkt, Rxx_op, false)), CAST(64, IL_FALSE, op_MUL_19)); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_SUB_22)); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rxx -= mpyu(Rs.l,Rt.h):<<1 +RzILOpEffect *hex_il_op_m2_mpyud_nac_lh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = Rxx - (((st64) ((ut64) ((ut32) ((ut16) ((Rs >> 0x0) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x1); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(CAST(64, IL_FALSE, CAST(64, IL_FALSE, op_MUL_19)), SN(32, 1)); + RzILOpPure *op_SUB_24 = SUB(READ_REG(pkt, Rxx_op, false), op_LSHIFT_23); + RzILOpEffect *op_ASSIGN_25 = WRITE_REG(bundle, Rxx_op, op_SUB_24); + + RzILOpEffect *instruction_sequence = op_ASSIGN_25; + return instruction_sequence; +} + +// Rxx -= mpyu(Rs.l,Rt.l) +RzILOpEffect *hex_il_op_m2_mpyud_nac_ll_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = ((st64) ((ut64) Rxx) - ((ut64) ((ut32) ((ut16) ((Rs >> 0x0) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_SUB_22 = SUB(CAST(64, IL_FALSE, READ_REG(pkt, Rxx_op, false)), CAST(64, IL_FALSE, op_MUL_19)); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_SUB_22)); + + RzILOpEffect *instruction_sequence = op_ASSIGN_24; + return instruction_sequence; +} + +// Rxx -= mpyu(Rs.l,Rt.l):<<1 +RzILOpEffect *hex_il_op_m2_mpyud_nac_ll_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = Rxx - (((st64) ((ut64) ((ut32) ((ut16) ((Rs >> 0x0) & 0xffff))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x1); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpPure *op_MUL_19 = MUL(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_16))); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(CAST(64, IL_FALSE, CAST(64, IL_FALSE, op_MUL_19)), SN(32, 1)); + RzILOpPure *op_SUB_24 = SUB(READ_REG(pkt, Rxx_op, false), op_LSHIFT_23); + RzILOpEffect *op_ASSIGN_25 = WRITE_REG(bundle, Rxx_op, op_SUB_24); + + RzILOpEffect *instruction_sequence = op_ASSIGN_25; + return instruction_sequence; +} + +// Rx -= add(Rs,Rt) +RzILOpEffect *hex_il_op_m2_nacci(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = Rx - Rs + Rt; + RzILOpPure *op_ADD_3 = ADD(Rs, Rt); + RzILOpPure *op_SUB_4 = SUB(READ_REG(pkt, Rx_op, false), op_ADD_3); + RzILOpEffect *op_ASSIGN_5 = WRITE_REG(bundle, Rx_op, op_SUB_4); + + RzILOpEffect *instruction_sequence = op_ASSIGN_5; + return instruction_sequence; +} + +// Rx -= add(Rs,Ii) +RzILOpEffect *hex_il_op_m2_naccii(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // Rx = Rx - Rs + s; + RzILOpPure *op_ADD_4 = ADD(Rs, VARL("s")); + RzILOpPure *op_SUB_5 = SUB(READ_REG(pkt, Rx_op, false), op_ADD_4); + RzILOpEffect *op_ASSIGN_6 = WRITE_REG(bundle, Rx_op, op_SUB_5); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_0, op_ASSIGN_6); + return instruction_sequence; +} + +// Rx += sub(Rt,Rs) +RzILOpEffect *hex_il_op_m2_subacc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rx = Rx + Rt - Rs; + RzILOpPure *op_ADD_2 = ADD(READ_REG(pkt, Rx_op, false), Rt); + RzILOpPure *op_SUB_4 = SUB(op_ADD_2, Rs); + RzILOpEffect *op_ASSIGN_5 = WRITE_REG(bundle, Rx_op, op_SUB_4); + + RzILOpEffect *instruction_sequence = op_ASSIGN_5; + return instruction_sequence; +} + +// Rdd = vabsdiffh(Rtt,Rss) +RzILOpEffect *hex_il_op_m2_vabsdiffh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp365 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp365", VARL("i")); + + // seq(h_tmp365 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) < 0x0) ? (-((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))) : ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rtt, op_MUL_18); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rss, op_MUL_26); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_27, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_34 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_22), DUP(op_AND_22))), CAST(16, MSB(DUP(op_AND_22)), DUP(op_AND_22))), CAST(32, MSB(CAST(16, MSB(op_AND_30), DUP(op_AND_30))), CAST(16, MSB(DUP(op_AND_30)), DUP(op_AND_30)))); + RzILOpPure *op_LT_36 = SLT(op_SUB_34, SN(32, 0)); + RzILOpPure *op_MUL_38 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_39 = SHIFTRA(DUP(Rtt), op_MUL_38); + RzILOpPure *op_AND_42 = LOGAND(op_RSHIFT_39, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_45 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_46 = SHIFTRA(DUP(Rss), op_MUL_45); + RzILOpPure *op_AND_49 = LOGAND(op_RSHIFT_46, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_53 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_42), DUP(op_AND_42))), CAST(16, MSB(DUP(op_AND_42)), DUP(op_AND_42))), CAST(32, MSB(CAST(16, MSB(op_AND_49), DUP(op_AND_49))), CAST(16, MSB(DUP(op_AND_49)), DUP(op_AND_49)))); + RzILOpPure *op_NEG_54 = NEG(op_SUB_53); + RzILOpPure *op_MUL_56 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_57 = SHIFTRA(DUP(Rtt), op_MUL_56); + RzILOpPure *op_AND_60 = LOGAND(op_RSHIFT_57, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_63 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_64 = SHIFTRA(DUP(Rss), op_MUL_63); + RzILOpPure *op_AND_67 = LOGAND(op_RSHIFT_64, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_71 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_60), DUP(op_AND_60))), CAST(16, MSB(DUP(op_AND_60)), DUP(op_AND_60))), CAST(32, MSB(CAST(16, MSB(op_AND_67), DUP(op_AND_67))), CAST(16, MSB(DUP(op_AND_67)), DUP(op_AND_67)))); + RzILOpPure *cond_72 = ITE(op_LT_36, op_NEG_54, op_SUB_71); + RzILOpPure *op_AND_74 = LOGAND(cond_72, SN(32, 0xffff)); + RzILOpPure *op_MUL_77 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_78 = SHIFTL0(CAST(64, IL_FALSE, op_AND_74), op_MUL_77); + RzILOpPure *op_OR_80 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_78); + RzILOpEffect *op_ASSIGN_82 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_80)); + + // seq(h_tmp365; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x1 ...; + RzILOpEffect *seq_84 = op_ASSIGN_82; + + // seq(seq(h_tmp365; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_85 = SEQN(2, seq_84, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp365; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_86 = REPEAT(op_LT_4, seq_85); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp365; Rdd = ((st64) ...; + RzILOpEffect *seq_87 = SEQN(2, op_ASSIGN_2, for_86); + + RzILOpEffect *instruction_sequence = seq_87; + return instruction_sequence; +} + +// Rdd = vabsdiffw(Rtt,Rss) +RzILOpEffect *hex_il_op_m2_vabsdiffw(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp366 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp366", VARL("i")); + + // seq(h_tmp366 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ((((((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff))) - ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) < ((st64) 0x0)) ? (-((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff))) - ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))) : ((st64) ((st32) ((Rtt >> i * 0x20) & 0xffffffff))) - ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))) & 0xffffffff) << i * 0x20)); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffffffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rtt, op_MUL_18); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_19, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rss, op_MUL_26); + RzILOpPure *op_AND_29 = LOGAND(op_RSHIFT_27, SN(64, 0xffffffff)); + RzILOpPure *op_SUB_32 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_21), DUP(op_AND_21))), CAST(32, MSB(DUP(op_AND_21)), DUP(op_AND_21))), CAST(64, MSB(CAST(32, MSB(op_AND_29), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29)))); + RzILOpPure *op_LT_35 = SLT(op_SUB_32, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_MUL_37 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_38 = SHIFTRA(DUP(Rtt), op_MUL_37); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_38, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_44 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_45 = SHIFTRA(DUP(Rss), op_MUL_44); + RzILOpPure *op_AND_47 = LOGAND(op_RSHIFT_45, SN(64, 0xffffffff)); + RzILOpPure *op_SUB_50 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_40), DUP(op_AND_40))), CAST(32, MSB(DUP(op_AND_40)), DUP(op_AND_40))), CAST(64, MSB(CAST(32, MSB(op_AND_47), DUP(op_AND_47))), CAST(32, MSB(DUP(op_AND_47)), DUP(op_AND_47)))); + RzILOpPure *op_NEG_51 = NEG(op_SUB_50); + RzILOpPure *op_MUL_53 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_54 = SHIFTRA(DUP(Rtt), op_MUL_53); + RzILOpPure *op_AND_56 = LOGAND(op_RSHIFT_54, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_60 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_61 = SHIFTRA(DUP(Rss), op_MUL_60); + RzILOpPure *op_AND_63 = LOGAND(op_RSHIFT_61, SN(64, 0xffffffff)); + RzILOpPure *op_SUB_66 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_56), DUP(op_AND_56))), CAST(32, MSB(DUP(op_AND_56)), DUP(op_AND_56))), CAST(64, MSB(CAST(32, MSB(op_AND_63), DUP(op_AND_63))), CAST(32, MSB(DUP(op_AND_63)), DUP(op_AND_63)))); + RzILOpPure *cond_67 = ITE(op_LT_35, op_NEG_51, op_SUB_66); + RzILOpPure *op_AND_69 = LOGAND(cond_67, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_71 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_72 = SHIFTL0(op_AND_69, op_MUL_71); + RzILOpPure *op_OR_73 = LOGOR(op_AND_15, op_LSHIFT_72); + RzILOpEffect *op_ASSIGN_74 = WRITE_REG(bundle, Rdd_op, op_OR_73); + + // seq(h_tmp366; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ((((( ...; + RzILOpEffect *seq_76 = op_ASSIGN_74; + + // seq(seq(h_tmp366; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ( ...; + RzILOpEffect *seq_77 = SEQN(2, seq_76, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp366; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_78 = REPEAT(op_LT_4, seq_77); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp366; Rdd = ((Rdd & ...; + RzILOpEffect *seq_79 = SEQN(2, op_ASSIGN_2, for_78); + + RzILOpEffect *instruction_sequence = seq_79; + return instruction_sequence; +} + +// Rxx += vcmpyi(Rss,Rtt):sat +RzILOpEffect *hex_il_op_m2_vcmac_s0_sat_i(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_177 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rss, SN(32, 16)); + RzILOpPure *op_AND_26 = LOGAND(op_RSHIFT_23, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_33 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_36 = LOGAND(op_RSHIFT_33, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_39 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_26), DUP(op_AND_26))), CAST(16, MSB(DUP(op_AND_26)), DUP(op_AND_26))), CAST(32, MSB(CAST(16, MSB(op_AND_36), DUP(op_AND_36))), CAST(16, MSB(DUP(op_AND_36)), DUP(op_AND_36)))); + RzILOpPure *op_RSHIFT_44 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_47 = LOGAND(op_RSHIFT_44, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_53 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_56 = LOGAND(op_RSHIFT_53, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_59 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_47), DUP(op_AND_47))), CAST(16, MSB(DUP(op_AND_47)), DUP(op_AND_47))), CAST(32, MSB(CAST(16, MSB(op_AND_56), DUP(op_AND_56))), CAST(16, MSB(DUP(op_AND_56)), DUP(op_AND_56)))); + RzILOpPure *op_ADD_61 = ADD(CAST(64, MSB(op_MUL_39), DUP(op_MUL_39)), CAST(64, MSB(op_MUL_59), DUP(op_MUL_59))); + RzILOpPure *op_LSHIFT_63 = SHIFTL0(op_ADD_61, SN(32, 0)); + RzILOpPure *op_ADD_64 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_16), DUP(op_AND_16))), CAST(32, MSB(DUP(op_AND_16)), DUP(op_AND_16))), op_LSHIFT_63); + RzILOpPure *op_RSHIFT_73 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_75 = LOGAND(op_RSHIFT_73, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_81 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_84 = LOGAND(op_RSHIFT_81, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_90 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_93 = LOGAND(op_RSHIFT_90, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_96 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_84), DUP(op_AND_84))), CAST(16, MSB(DUP(op_AND_84)), DUP(op_AND_84))), CAST(32, MSB(CAST(16, MSB(op_AND_93), DUP(op_AND_93))), CAST(16, MSB(DUP(op_AND_93)), DUP(op_AND_93)))); + RzILOpPure *op_RSHIFT_101 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_104 = LOGAND(op_RSHIFT_101, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_110 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_113 = LOGAND(op_RSHIFT_110, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_116 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_104), DUP(op_AND_104))), CAST(16, MSB(DUP(op_AND_104)), DUP(op_AND_104))), CAST(32, MSB(CAST(16, MSB(op_AND_113), DUP(op_AND_113))), CAST(16, MSB(DUP(op_AND_113)), DUP(op_AND_113)))); + RzILOpPure *op_ADD_118 = ADD(CAST(64, MSB(op_MUL_96), DUP(op_MUL_96)), CAST(64, MSB(op_MUL_116), DUP(op_MUL_116))); + RzILOpPure *op_LSHIFT_120 = SHIFTL0(op_ADD_118, SN(32, 0)); + RzILOpPure *op_ADD_121 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_75), DUP(op_AND_75))), CAST(32, MSB(DUP(op_AND_75)), DUP(op_AND_75))), op_LSHIFT_120); + RzILOpPure *op_EQ_122 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_64), SN(32, 0), SN(32, 0x20)), op_ADD_121); + RzILOpPure *op_RSHIFT_181 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_183 = LOGAND(op_RSHIFT_181, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_189 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_192 = LOGAND(op_RSHIFT_189, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_198 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_201 = LOGAND(op_RSHIFT_198, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_204 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_192), DUP(op_AND_192))), CAST(16, MSB(DUP(op_AND_192)), DUP(op_AND_192))), CAST(32, MSB(CAST(16, MSB(op_AND_201), DUP(op_AND_201))), CAST(16, MSB(DUP(op_AND_201)), DUP(op_AND_201)))); + RzILOpPure *op_RSHIFT_209 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_212 = LOGAND(op_RSHIFT_209, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_218 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_221 = LOGAND(op_RSHIFT_218, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_224 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_212), DUP(op_AND_212))), CAST(16, MSB(DUP(op_AND_212)), DUP(op_AND_212))), CAST(32, MSB(CAST(16, MSB(op_AND_221), DUP(op_AND_221))), CAST(16, MSB(DUP(op_AND_221)), DUP(op_AND_221)))); + RzILOpPure *op_ADD_226 = ADD(CAST(64, MSB(op_MUL_204), DUP(op_MUL_204)), CAST(64, MSB(op_MUL_224), DUP(op_MUL_224))); + RzILOpPure *op_LSHIFT_228 = SHIFTL0(op_ADD_226, SN(32, 0)); + RzILOpPure *op_ADD_229 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_183), DUP(op_AND_183))), CAST(32, MSB(DUP(op_AND_183)), DUP(op_AND_183))), op_LSHIFT_228); + RzILOpPure *op_LT_232 = SLT(op_ADD_229, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_237 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_238 = NEG(op_LSHIFT_237); + RzILOpPure *op_LSHIFT_243 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_246 = SUB(op_LSHIFT_243, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_247 = ITE(op_LT_232, op_NEG_238, op_SUB_246); + RzILOpEffect *gcc_expr_248 = BRANCH(op_EQ_122, EMPTY(), set_usr_field_call_177); + + // h_tmp367 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_250 = SETL("h_tmp367", cond_247); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_251 = SEQN(2, gcc_expr_248, op_ASSIGN_hybrid_tmp_250); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0)) ? ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0) : h_tmp367) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_126 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_128 = LOGAND(op_RSHIFT_126, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_134 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_137 = LOGAND(op_RSHIFT_134, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_143 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_146 = LOGAND(op_RSHIFT_143, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_149 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_137), DUP(op_AND_137))), CAST(16, MSB(DUP(op_AND_137)), DUP(op_AND_137))), CAST(32, MSB(CAST(16, MSB(op_AND_146), DUP(op_AND_146))), CAST(16, MSB(DUP(op_AND_146)), DUP(op_AND_146)))); + RzILOpPure *op_RSHIFT_154 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_157 = LOGAND(op_RSHIFT_154, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_163 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_166 = LOGAND(op_RSHIFT_163, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_169 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_157), DUP(op_AND_157))), CAST(16, MSB(DUP(op_AND_157)), DUP(op_AND_157))), CAST(32, MSB(CAST(16, MSB(op_AND_166), DUP(op_AND_166))), CAST(16, MSB(DUP(op_AND_166)), DUP(op_AND_166)))); + RzILOpPure *op_ADD_171 = ADD(CAST(64, MSB(op_MUL_149), DUP(op_MUL_149)), CAST(64, MSB(op_MUL_169), DUP(op_MUL_169))); + RzILOpPure *op_LSHIFT_173 = SHIFTL0(op_ADD_171, SN(32, 0)); + RzILOpPure *op_ADD_174 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_128), DUP(op_AND_128))), CAST(32, MSB(DUP(op_AND_128)), DUP(op_AND_128))), op_LSHIFT_173); + RzILOpPure *cond_252 = ITE(DUP(op_EQ_122), op_ADD_174, VARL("h_tmp367")); + RzILOpPure *op_AND_254 = LOGAND(cond_252, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_258 = SHIFTL0(op_AND_254, SN(32, 0)); + RzILOpPure *op_OR_259 = LOGOR(op_AND_7, op_LSHIFT_258); + RzILOpEffect *op_ASSIGN_260 = WRITE_REG(bundle, Rxx_op, op_OR_259); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_261 = SEQN(2, seq_251, op_ASSIGN_260); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_437 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_276 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_278 = LOGAND(op_RSHIFT_276, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_284 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_287 = LOGAND(op_RSHIFT_284, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_293 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_296 = LOGAND(op_RSHIFT_293, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_299 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_287), DUP(op_AND_287))), CAST(16, MSB(DUP(op_AND_287)), DUP(op_AND_287))), CAST(32, MSB(CAST(16, MSB(op_AND_296), DUP(op_AND_296))), CAST(16, MSB(DUP(op_AND_296)), DUP(op_AND_296)))); + RzILOpPure *op_RSHIFT_304 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_307 = LOGAND(op_RSHIFT_304, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_313 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_316 = LOGAND(op_RSHIFT_313, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_319 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_307), DUP(op_AND_307))), CAST(16, MSB(DUP(op_AND_307)), DUP(op_AND_307))), CAST(32, MSB(CAST(16, MSB(op_AND_316), DUP(op_AND_316))), CAST(16, MSB(DUP(op_AND_316)), DUP(op_AND_316)))); + RzILOpPure *op_ADD_321 = ADD(CAST(64, MSB(op_MUL_299), DUP(op_MUL_299)), CAST(64, MSB(op_MUL_319), DUP(op_MUL_319))); + RzILOpPure *op_LSHIFT_323 = SHIFTL0(op_ADD_321, SN(32, 0)); + RzILOpPure *op_ADD_324 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_278), DUP(op_AND_278))), CAST(32, MSB(DUP(op_AND_278)), DUP(op_AND_278))), op_LSHIFT_323); + RzILOpPure *op_RSHIFT_333 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_335 = LOGAND(op_RSHIFT_333, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_341 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_344 = LOGAND(op_RSHIFT_341, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_350 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_353 = LOGAND(op_RSHIFT_350, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_356 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_344), DUP(op_AND_344))), CAST(16, MSB(DUP(op_AND_344)), DUP(op_AND_344))), CAST(32, MSB(CAST(16, MSB(op_AND_353), DUP(op_AND_353))), CAST(16, MSB(DUP(op_AND_353)), DUP(op_AND_353)))); + RzILOpPure *op_RSHIFT_361 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_364 = LOGAND(op_RSHIFT_361, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_370 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_373 = LOGAND(op_RSHIFT_370, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_376 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_364), DUP(op_AND_364))), CAST(16, MSB(DUP(op_AND_364)), DUP(op_AND_364))), CAST(32, MSB(CAST(16, MSB(op_AND_373), DUP(op_AND_373))), CAST(16, MSB(DUP(op_AND_373)), DUP(op_AND_373)))); + RzILOpPure *op_ADD_378 = ADD(CAST(64, MSB(op_MUL_356), DUP(op_MUL_356)), CAST(64, MSB(op_MUL_376), DUP(op_MUL_376))); + RzILOpPure *op_LSHIFT_380 = SHIFTL0(op_ADD_378, SN(32, 0)); + RzILOpPure *op_ADD_381 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_335), DUP(op_AND_335))), CAST(32, MSB(DUP(op_AND_335)), DUP(op_AND_335))), op_LSHIFT_380); + RzILOpPure *op_EQ_382 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_324), SN(32, 0), SN(32, 0x20)), op_ADD_381); + RzILOpPure *op_RSHIFT_441 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_443 = LOGAND(op_RSHIFT_441, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_449 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_452 = LOGAND(op_RSHIFT_449, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_458 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_461 = LOGAND(op_RSHIFT_458, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_464 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_452), DUP(op_AND_452))), CAST(16, MSB(DUP(op_AND_452)), DUP(op_AND_452))), CAST(32, MSB(CAST(16, MSB(op_AND_461), DUP(op_AND_461))), CAST(16, MSB(DUP(op_AND_461)), DUP(op_AND_461)))); + RzILOpPure *op_RSHIFT_469 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_472 = LOGAND(op_RSHIFT_469, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_478 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_481 = LOGAND(op_RSHIFT_478, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_484 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_472), DUP(op_AND_472))), CAST(16, MSB(DUP(op_AND_472)), DUP(op_AND_472))), CAST(32, MSB(CAST(16, MSB(op_AND_481), DUP(op_AND_481))), CAST(16, MSB(DUP(op_AND_481)), DUP(op_AND_481)))); + RzILOpPure *op_ADD_486 = ADD(CAST(64, MSB(op_MUL_464), DUP(op_MUL_464)), CAST(64, MSB(op_MUL_484), DUP(op_MUL_484))); + RzILOpPure *op_LSHIFT_488 = SHIFTL0(op_ADD_486, SN(32, 0)); + RzILOpPure *op_ADD_489 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_443), DUP(op_AND_443))), CAST(32, MSB(DUP(op_AND_443)), DUP(op_AND_443))), op_LSHIFT_488); + RzILOpPure *op_LT_492 = SLT(op_ADD_489, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_497 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_498 = NEG(op_LSHIFT_497); + RzILOpPure *op_LSHIFT_503 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_506 = SUB(op_LSHIFT_503, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_507 = ITE(op_LT_492, op_NEG_498, op_SUB_506); + RzILOpEffect *gcc_expr_508 = BRANCH(op_EQ_382, EMPTY(), set_usr_field_call_437); + + // h_tmp368 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_510 = SETL("h_tmp368", cond_507); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_511 = SEQN(2, gcc_expr_508, op_ASSIGN_hybrid_tmp_510); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0)) ? ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0) : h_tmp368) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_267 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_268 = LOGNOT(op_LSHIFT_267); + RzILOpPure *op_AND_269 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_268); + RzILOpPure *op_RSHIFT_386 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_388 = LOGAND(op_RSHIFT_386, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_394 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_397 = LOGAND(op_RSHIFT_394, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_403 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_406 = LOGAND(op_RSHIFT_403, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_409 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_397), DUP(op_AND_397))), CAST(16, MSB(DUP(op_AND_397)), DUP(op_AND_397))), CAST(32, MSB(CAST(16, MSB(op_AND_406), DUP(op_AND_406))), CAST(16, MSB(DUP(op_AND_406)), DUP(op_AND_406)))); + RzILOpPure *op_RSHIFT_414 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_417 = LOGAND(op_RSHIFT_414, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_423 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_426 = LOGAND(op_RSHIFT_423, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_429 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_417), DUP(op_AND_417))), CAST(16, MSB(DUP(op_AND_417)), DUP(op_AND_417))), CAST(32, MSB(CAST(16, MSB(op_AND_426), DUP(op_AND_426))), CAST(16, MSB(DUP(op_AND_426)), DUP(op_AND_426)))); + RzILOpPure *op_ADD_431 = ADD(CAST(64, MSB(op_MUL_409), DUP(op_MUL_409)), CAST(64, MSB(op_MUL_429), DUP(op_MUL_429))); + RzILOpPure *op_LSHIFT_433 = SHIFTL0(op_ADD_431, SN(32, 0)); + RzILOpPure *op_ADD_434 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_388), DUP(op_AND_388))), CAST(32, MSB(DUP(op_AND_388)), DUP(op_AND_388))), op_LSHIFT_433); + RzILOpPure *cond_512 = ITE(DUP(op_EQ_382), op_ADD_434, VARL("h_tmp368")); + RzILOpPure *op_AND_514 = LOGAND(cond_512, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_518 = SHIFTL0(op_AND_514, SN(32, 0x20)); + RzILOpPure *op_OR_519 = LOGOR(op_AND_269, op_LSHIFT_518); + RzILOpEffect *op_ASSIGN_520 = WRITE_REG(bundle, Rxx_op, op_OR_519); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_521 = SEQN(2, seq_511, op_ASSIGN_520); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_261, seq_521); + return instruction_sequence; +} + +// Rxx += vcmpyr(Rss,Rtt):sat +RzILOpEffect *hex_il_op_m2_vcmac_s0_sat_r(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_177 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_26 = LOGAND(op_RSHIFT_23, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_33 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_36 = LOGAND(op_RSHIFT_33, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_39 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_26), DUP(op_AND_26))), CAST(16, MSB(DUP(op_AND_26)), DUP(op_AND_26))), CAST(32, MSB(CAST(16, MSB(op_AND_36), DUP(op_AND_36))), CAST(16, MSB(DUP(op_AND_36)), DUP(op_AND_36)))); + RzILOpPure *op_RSHIFT_44 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_47 = LOGAND(op_RSHIFT_44, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_53 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_56 = LOGAND(op_RSHIFT_53, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_59 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_47), DUP(op_AND_47))), CAST(16, MSB(DUP(op_AND_47)), DUP(op_AND_47))), CAST(32, MSB(CAST(16, MSB(op_AND_56), DUP(op_AND_56))), CAST(16, MSB(DUP(op_AND_56)), DUP(op_AND_56)))); + RzILOpPure *op_SUB_61 = SUB(CAST(64, MSB(op_MUL_39), DUP(op_MUL_39)), CAST(64, MSB(op_MUL_59), DUP(op_MUL_59))); + RzILOpPure *op_LSHIFT_63 = SHIFTL0(op_SUB_61, SN(32, 0)); + RzILOpPure *op_ADD_64 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_16), DUP(op_AND_16))), CAST(32, MSB(DUP(op_AND_16)), DUP(op_AND_16))), op_LSHIFT_63); + RzILOpPure *op_RSHIFT_73 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_75 = LOGAND(op_RSHIFT_73, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_81 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_84 = LOGAND(op_RSHIFT_81, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_90 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_93 = LOGAND(op_RSHIFT_90, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_96 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_84), DUP(op_AND_84))), CAST(16, MSB(DUP(op_AND_84)), DUP(op_AND_84))), CAST(32, MSB(CAST(16, MSB(op_AND_93), DUP(op_AND_93))), CAST(16, MSB(DUP(op_AND_93)), DUP(op_AND_93)))); + RzILOpPure *op_RSHIFT_101 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_104 = LOGAND(op_RSHIFT_101, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_110 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_113 = LOGAND(op_RSHIFT_110, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_116 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_104), DUP(op_AND_104))), CAST(16, MSB(DUP(op_AND_104)), DUP(op_AND_104))), CAST(32, MSB(CAST(16, MSB(op_AND_113), DUP(op_AND_113))), CAST(16, MSB(DUP(op_AND_113)), DUP(op_AND_113)))); + RzILOpPure *op_SUB_118 = SUB(CAST(64, MSB(op_MUL_96), DUP(op_MUL_96)), CAST(64, MSB(op_MUL_116), DUP(op_MUL_116))); + RzILOpPure *op_LSHIFT_120 = SHIFTL0(op_SUB_118, SN(32, 0)); + RzILOpPure *op_ADD_121 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_75), DUP(op_AND_75))), CAST(32, MSB(DUP(op_AND_75)), DUP(op_AND_75))), op_LSHIFT_120); + RzILOpPure *op_EQ_122 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_64), SN(32, 0), SN(32, 0x20)), op_ADD_121); + RzILOpPure *op_RSHIFT_181 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_183 = LOGAND(op_RSHIFT_181, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_189 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_192 = LOGAND(op_RSHIFT_189, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_198 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_201 = LOGAND(op_RSHIFT_198, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_204 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_192), DUP(op_AND_192))), CAST(16, MSB(DUP(op_AND_192)), DUP(op_AND_192))), CAST(32, MSB(CAST(16, MSB(op_AND_201), DUP(op_AND_201))), CAST(16, MSB(DUP(op_AND_201)), DUP(op_AND_201)))); + RzILOpPure *op_RSHIFT_209 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_212 = LOGAND(op_RSHIFT_209, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_218 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_221 = LOGAND(op_RSHIFT_218, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_224 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_212), DUP(op_AND_212))), CAST(16, MSB(DUP(op_AND_212)), DUP(op_AND_212))), CAST(32, MSB(CAST(16, MSB(op_AND_221), DUP(op_AND_221))), CAST(16, MSB(DUP(op_AND_221)), DUP(op_AND_221)))); + RzILOpPure *op_SUB_226 = SUB(CAST(64, MSB(op_MUL_204), DUP(op_MUL_204)), CAST(64, MSB(op_MUL_224), DUP(op_MUL_224))); + RzILOpPure *op_LSHIFT_228 = SHIFTL0(op_SUB_226, SN(32, 0)); + RzILOpPure *op_ADD_229 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_183), DUP(op_AND_183))), CAST(32, MSB(DUP(op_AND_183)), DUP(op_AND_183))), op_LSHIFT_228); + RzILOpPure *op_LT_232 = SLT(op_ADD_229, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_237 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_238 = NEG(op_LSHIFT_237); + RzILOpPure *op_LSHIFT_243 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_246 = SUB(op_LSHIFT_243, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_247 = ITE(op_LT_232, op_NEG_238, op_SUB_246); + RzILOpEffect *gcc_expr_248 = BRANCH(op_EQ_122, EMPTY(), set_usr_field_call_177); + + // h_tmp369 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_250 = SETL("h_tmp369", cond_247); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_251 = SEQN(2, gcc_expr_248, op_ASSIGN_hybrid_tmp_250); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0)) ? ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0) : h_tmp369) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_126 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_128 = LOGAND(op_RSHIFT_126, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_134 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_137 = LOGAND(op_RSHIFT_134, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_143 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_146 = LOGAND(op_RSHIFT_143, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_149 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_137), DUP(op_AND_137))), CAST(16, MSB(DUP(op_AND_137)), DUP(op_AND_137))), CAST(32, MSB(CAST(16, MSB(op_AND_146), DUP(op_AND_146))), CAST(16, MSB(DUP(op_AND_146)), DUP(op_AND_146)))); + RzILOpPure *op_RSHIFT_154 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_157 = LOGAND(op_RSHIFT_154, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_163 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_166 = LOGAND(op_RSHIFT_163, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_169 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_157), DUP(op_AND_157))), CAST(16, MSB(DUP(op_AND_157)), DUP(op_AND_157))), CAST(32, MSB(CAST(16, MSB(op_AND_166), DUP(op_AND_166))), CAST(16, MSB(DUP(op_AND_166)), DUP(op_AND_166)))); + RzILOpPure *op_SUB_171 = SUB(CAST(64, MSB(op_MUL_149), DUP(op_MUL_149)), CAST(64, MSB(op_MUL_169), DUP(op_MUL_169))); + RzILOpPure *op_LSHIFT_173 = SHIFTL0(op_SUB_171, SN(32, 0)); + RzILOpPure *op_ADD_174 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_128), DUP(op_AND_128))), CAST(32, MSB(DUP(op_AND_128)), DUP(op_AND_128))), op_LSHIFT_173); + RzILOpPure *cond_252 = ITE(DUP(op_EQ_122), op_ADD_174, VARL("h_tmp369")); + RzILOpPure *op_AND_254 = LOGAND(cond_252, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_258 = SHIFTL0(op_AND_254, SN(32, 0)); + RzILOpPure *op_OR_259 = LOGOR(op_AND_7, op_LSHIFT_258); + RzILOpEffect *op_ASSIGN_260 = WRITE_REG(bundle, Rxx_op, op_OR_259); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_261 = SEQN(2, seq_251, op_ASSIGN_260); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_437 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_276 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_278 = LOGAND(op_RSHIFT_276, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_284 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_287 = LOGAND(op_RSHIFT_284, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_293 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_296 = LOGAND(op_RSHIFT_293, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_299 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_287), DUP(op_AND_287))), CAST(16, MSB(DUP(op_AND_287)), DUP(op_AND_287))), CAST(32, MSB(CAST(16, MSB(op_AND_296), DUP(op_AND_296))), CAST(16, MSB(DUP(op_AND_296)), DUP(op_AND_296)))); + RzILOpPure *op_RSHIFT_304 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_307 = LOGAND(op_RSHIFT_304, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_313 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_316 = LOGAND(op_RSHIFT_313, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_319 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_307), DUP(op_AND_307))), CAST(16, MSB(DUP(op_AND_307)), DUP(op_AND_307))), CAST(32, MSB(CAST(16, MSB(op_AND_316), DUP(op_AND_316))), CAST(16, MSB(DUP(op_AND_316)), DUP(op_AND_316)))); + RzILOpPure *op_SUB_321 = SUB(CAST(64, MSB(op_MUL_299), DUP(op_MUL_299)), CAST(64, MSB(op_MUL_319), DUP(op_MUL_319))); + RzILOpPure *op_LSHIFT_323 = SHIFTL0(op_SUB_321, SN(32, 0)); + RzILOpPure *op_ADD_324 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_278), DUP(op_AND_278))), CAST(32, MSB(DUP(op_AND_278)), DUP(op_AND_278))), op_LSHIFT_323); + RzILOpPure *op_RSHIFT_333 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_335 = LOGAND(op_RSHIFT_333, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_341 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_344 = LOGAND(op_RSHIFT_341, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_350 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_353 = LOGAND(op_RSHIFT_350, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_356 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_344), DUP(op_AND_344))), CAST(16, MSB(DUP(op_AND_344)), DUP(op_AND_344))), CAST(32, MSB(CAST(16, MSB(op_AND_353), DUP(op_AND_353))), CAST(16, MSB(DUP(op_AND_353)), DUP(op_AND_353)))); + RzILOpPure *op_RSHIFT_361 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_364 = LOGAND(op_RSHIFT_361, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_370 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_373 = LOGAND(op_RSHIFT_370, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_376 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_364), DUP(op_AND_364))), CAST(16, MSB(DUP(op_AND_364)), DUP(op_AND_364))), CAST(32, MSB(CAST(16, MSB(op_AND_373), DUP(op_AND_373))), CAST(16, MSB(DUP(op_AND_373)), DUP(op_AND_373)))); + RzILOpPure *op_SUB_378 = SUB(CAST(64, MSB(op_MUL_356), DUP(op_MUL_356)), CAST(64, MSB(op_MUL_376), DUP(op_MUL_376))); + RzILOpPure *op_LSHIFT_380 = SHIFTL0(op_SUB_378, SN(32, 0)); + RzILOpPure *op_ADD_381 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_335), DUP(op_AND_335))), CAST(32, MSB(DUP(op_AND_335)), DUP(op_AND_335))), op_LSHIFT_380); + RzILOpPure *op_EQ_382 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_324), SN(32, 0), SN(32, 0x20)), op_ADD_381); + RzILOpPure *op_RSHIFT_441 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_443 = LOGAND(op_RSHIFT_441, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_449 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_452 = LOGAND(op_RSHIFT_449, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_458 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_461 = LOGAND(op_RSHIFT_458, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_464 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_452), DUP(op_AND_452))), CAST(16, MSB(DUP(op_AND_452)), DUP(op_AND_452))), CAST(32, MSB(CAST(16, MSB(op_AND_461), DUP(op_AND_461))), CAST(16, MSB(DUP(op_AND_461)), DUP(op_AND_461)))); + RzILOpPure *op_RSHIFT_469 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_472 = LOGAND(op_RSHIFT_469, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_478 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_481 = LOGAND(op_RSHIFT_478, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_484 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_472), DUP(op_AND_472))), CAST(16, MSB(DUP(op_AND_472)), DUP(op_AND_472))), CAST(32, MSB(CAST(16, MSB(op_AND_481), DUP(op_AND_481))), CAST(16, MSB(DUP(op_AND_481)), DUP(op_AND_481)))); + RzILOpPure *op_SUB_486 = SUB(CAST(64, MSB(op_MUL_464), DUP(op_MUL_464)), CAST(64, MSB(op_MUL_484), DUP(op_MUL_484))); + RzILOpPure *op_LSHIFT_488 = SHIFTL0(op_SUB_486, SN(32, 0)); + RzILOpPure *op_ADD_489 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_443), DUP(op_AND_443))), CAST(32, MSB(DUP(op_AND_443)), DUP(op_AND_443))), op_LSHIFT_488); + RzILOpPure *op_LT_492 = SLT(op_ADD_489, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_497 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_498 = NEG(op_LSHIFT_497); + RzILOpPure *op_LSHIFT_503 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_506 = SUB(op_LSHIFT_503, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_507 = ITE(op_LT_492, op_NEG_498, op_SUB_506); + RzILOpEffect *gcc_expr_508 = BRANCH(op_EQ_382, EMPTY(), set_usr_field_call_437); + + // h_tmp370 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_510 = SETL("h_tmp370", cond_507); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_511 = SEQN(2, gcc_expr_508, op_ASSIGN_hybrid_tmp_510); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0)) ? ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0) : h_tmp370) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_267 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_268 = LOGNOT(op_LSHIFT_267); + RzILOpPure *op_AND_269 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_268); + RzILOpPure *op_RSHIFT_386 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_388 = LOGAND(op_RSHIFT_386, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_394 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_397 = LOGAND(op_RSHIFT_394, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_403 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_406 = LOGAND(op_RSHIFT_403, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_409 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_397), DUP(op_AND_397))), CAST(16, MSB(DUP(op_AND_397)), DUP(op_AND_397))), CAST(32, MSB(CAST(16, MSB(op_AND_406), DUP(op_AND_406))), CAST(16, MSB(DUP(op_AND_406)), DUP(op_AND_406)))); + RzILOpPure *op_RSHIFT_414 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_417 = LOGAND(op_RSHIFT_414, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_423 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_426 = LOGAND(op_RSHIFT_423, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_429 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_417), DUP(op_AND_417))), CAST(16, MSB(DUP(op_AND_417)), DUP(op_AND_417))), CAST(32, MSB(CAST(16, MSB(op_AND_426), DUP(op_AND_426))), CAST(16, MSB(DUP(op_AND_426)), DUP(op_AND_426)))); + RzILOpPure *op_SUB_431 = SUB(CAST(64, MSB(op_MUL_409), DUP(op_MUL_409)), CAST(64, MSB(op_MUL_429), DUP(op_MUL_429))); + RzILOpPure *op_LSHIFT_433 = SHIFTL0(op_SUB_431, SN(32, 0)); + RzILOpPure *op_ADD_434 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_388), DUP(op_AND_388))), CAST(32, MSB(DUP(op_AND_388)), DUP(op_AND_388))), op_LSHIFT_433); + RzILOpPure *cond_512 = ITE(DUP(op_EQ_382), op_ADD_434, VARL("h_tmp370")); + RzILOpPure *op_AND_514 = LOGAND(cond_512, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_518 = SHIFTL0(op_AND_514, SN(32, 0x20)); + RzILOpPure *op_OR_519 = LOGOR(op_AND_269, op_LSHIFT_518); + RzILOpEffect *op_ASSIGN_520 = WRITE_REG(bundle, Rxx_op, op_OR_519); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_521 = SEQN(2, seq_511, op_ASSIGN_520); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_261, seq_521); + return instruction_sequence; +} + +// Rdd = vcmpyi(Rss,Rtt):sat +RzILOpEffect *hex_il_op_m2_vcmpy_s0_sat_i(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_150 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rss, SN(32, 16)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_15, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_25 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_28 = LOGAND(op_RSHIFT_25, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_31 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(CAST(16, MSB(op_AND_28), DUP(op_AND_28))), CAST(16, MSB(DUP(op_AND_28)), DUP(op_AND_28)))); + RzILOpPure *op_RSHIFT_36 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_39 = LOGAND(op_RSHIFT_36, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_45 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_48 = LOGAND(op_RSHIFT_45, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_51 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_39), DUP(op_AND_39))), CAST(16, MSB(DUP(op_AND_39)), DUP(op_AND_39))), CAST(32, MSB(CAST(16, MSB(op_AND_48), DUP(op_AND_48))), CAST(16, MSB(DUP(op_AND_48)), DUP(op_AND_48)))); + RzILOpPure *op_ADD_53 = ADD(CAST(64, MSB(op_MUL_31), DUP(op_MUL_31)), CAST(64, MSB(op_MUL_51), DUP(op_MUL_51))); + RzILOpPure *op_LSHIFT_55 = SHIFTL0(op_ADD_53, SN(32, 0)); + RzILOpPure *op_RSHIFT_64 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_67 = LOGAND(op_RSHIFT_64, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_73 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_76 = LOGAND(op_RSHIFT_73, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_79 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_67), DUP(op_AND_67))), CAST(16, MSB(DUP(op_AND_67)), DUP(op_AND_67))), CAST(32, MSB(CAST(16, MSB(op_AND_76), DUP(op_AND_76))), CAST(16, MSB(DUP(op_AND_76)), DUP(op_AND_76)))); + RzILOpPure *op_RSHIFT_84 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_87 = LOGAND(op_RSHIFT_84, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_93 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_96 = LOGAND(op_RSHIFT_93, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_99 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_87), DUP(op_AND_87))), CAST(16, MSB(DUP(op_AND_87)), DUP(op_AND_87))), CAST(32, MSB(CAST(16, MSB(op_AND_96), DUP(op_AND_96))), CAST(16, MSB(DUP(op_AND_96)), DUP(op_AND_96)))); + RzILOpPure *op_ADD_101 = ADD(CAST(64, MSB(op_MUL_79), DUP(op_MUL_79)), CAST(64, MSB(op_MUL_99), DUP(op_MUL_99))); + RzILOpPure *op_LSHIFT_103 = SHIFTL0(op_ADD_101, SN(32, 0)); + RzILOpPure *op_EQ_104 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_LSHIFT_55), SN(32, 0), SN(32, 0x20)), op_LSHIFT_103); + RzILOpPure *op_RSHIFT_154 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_157 = LOGAND(op_RSHIFT_154, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_163 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_166 = LOGAND(op_RSHIFT_163, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_169 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_157), DUP(op_AND_157))), CAST(16, MSB(DUP(op_AND_157)), DUP(op_AND_157))), CAST(32, MSB(CAST(16, MSB(op_AND_166), DUP(op_AND_166))), CAST(16, MSB(DUP(op_AND_166)), DUP(op_AND_166)))); + RzILOpPure *op_RSHIFT_174 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_177 = LOGAND(op_RSHIFT_174, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_183 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_186 = LOGAND(op_RSHIFT_183, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_189 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_177), DUP(op_AND_177))), CAST(16, MSB(DUP(op_AND_177)), DUP(op_AND_177))), CAST(32, MSB(CAST(16, MSB(op_AND_186), DUP(op_AND_186))), CAST(16, MSB(DUP(op_AND_186)), DUP(op_AND_186)))); + RzILOpPure *op_ADD_191 = ADD(CAST(64, MSB(op_MUL_169), DUP(op_MUL_169)), CAST(64, MSB(op_MUL_189), DUP(op_MUL_189))); + RzILOpPure *op_LSHIFT_193 = SHIFTL0(op_ADD_191, SN(32, 0)); + RzILOpPure *op_LT_196 = SLT(op_LSHIFT_193, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_201 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_202 = NEG(op_LSHIFT_201); + RzILOpPure *op_LSHIFT_207 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_210 = SUB(op_LSHIFT_207, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_211 = ITE(op_LT_196, op_NEG_202, op_SUB_210); + RzILOpEffect *gcc_expr_212 = BRANCH(op_EQ_104, EMPTY(), set_usr_field_call_150); + + // h_tmp371 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_214 = SETL("h_tmp371", cond_211); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_215 = SEQN(2, gcc_expr_212, op_ASSIGN_hybrid_tmp_214); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0)) ? (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0) : h_tmp371) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_108 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_111 = LOGAND(op_RSHIFT_108, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_117 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_120 = LOGAND(op_RSHIFT_117, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_123 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_111), DUP(op_AND_111))), CAST(16, MSB(DUP(op_AND_111)), DUP(op_AND_111))), CAST(32, MSB(CAST(16, MSB(op_AND_120), DUP(op_AND_120))), CAST(16, MSB(DUP(op_AND_120)), DUP(op_AND_120)))); + RzILOpPure *op_RSHIFT_128 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_131 = LOGAND(op_RSHIFT_128, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_137 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_140 = LOGAND(op_RSHIFT_137, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_143 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_131), DUP(op_AND_131))), CAST(16, MSB(DUP(op_AND_131)), DUP(op_AND_131))), CAST(32, MSB(CAST(16, MSB(op_AND_140), DUP(op_AND_140))), CAST(16, MSB(DUP(op_AND_140)), DUP(op_AND_140)))); + RzILOpPure *op_ADD_145 = ADD(CAST(64, MSB(op_MUL_123), DUP(op_MUL_123)), CAST(64, MSB(op_MUL_143), DUP(op_MUL_143))); + RzILOpPure *op_LSHIFT_147 = SHIFTL0(op_ADD_145, SN(32, 0)); + RzILOpPure *cond_216 = ITE(DUP(op_EQ_104), op_LSHIFT_147, VARL("h_tmp371")); + RzILOpPure *op_AND_218 = LOGAND(cond_216, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_222 = SHIFTL0(op_AND_218, SN(32, 0)); + RzILOpPure *op_OR_223 = LOGOR(op_AND_7, op_LSHIFT_222); + RzILOpEffect *op_ASSIGN_224 = WRITE_REG(bundle, Rdd_op, op_OR_223); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_225 = SEQN(2, seq_215, op_ASSIGN_224); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_374 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_240 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_243 = LOGAND(op_RSHIFT_240, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_249 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_252 = LOGAND(op_RSHIFT_249, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_255 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_243), DUP(op_AND_243))), CAST(16, MSB(DUP(op_AND_243)), DUP(op_AND_243))), CAST(32, MSB(CAST(16, MSB(op_AND_252), DUP(op_AND_252))), CAST(16, MSB(DUP(op_AND_252)), DUP(op_AND_252)))); + RzILOpPure *op_RSHIFT_260 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_263 = LOGAND(op_RSHIFT_260, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_269 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_272 = LOGAND(op_RSHIFT_269, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_275 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_263), DUP(op_AND_263))), CAST(16, MSB(DUP(op_AND_263)), DUP(op_AND_263))), CAST(32, MSB(CAST(16, MSB(op_AND_272), DUP(op_AND_272))), CAST(16, MSB(DUP(op_AND_272)), DUP(op_AND_272)))); + RzILOpPure *op_ADD_277 = ADD(CAST(64, MSB(op_MUL_255), DUP(op_MUL_255)), CAST(64, MSB(op_MUL_275), DUP(op_MUL_275))); + RzILOpPure *op_LSHIFT_279 = SHIFTL0(op_ADD_277, SN(32, 0)); + RzILOpPure *op_RSHIFT_288 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_291 = LOGAND(op_RSHIFT_288, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_297 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_300 = LOGAND(op_RSHIFT_297, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_303 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_291), DUP(op_AND_291))), CAST(16, MSB(DUP(op_AND_291)), DUP(op_AND_291))), CAST(32, MSB(CAST(16, MSB(op_AND_300), DUP(op_AND_300))), CAST(16, MSB(DUP(op_AND_300)), DUP(op_AND_300)))); + RzILOpPure *op_RSHIFT_308 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_311 = LOGAND(op_RSHIFT_308, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_317 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_320 = LOGAND(op_RSHIFT_317, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_323 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_311), DUP(op_AND_311))), CAST(16, MSB(DUP(op_AND_311)), DUP(op_AND_311))), CAST(32, MSB(CAST(16, MSB(op_AND_320), DUP(op_AND_320))), CAST(16, MSB(DUP(op_AND_320)), DUP(op_AND_320)))); + RzILOpPure *op_ADD_325 = ADD(CAST(64, MSB(op_MUL_303), DUP(op_MUL_303)), CAST(64, MSB(op_MUL_323), DUP(op_MUL_323))); + RzILOpPure *op_LSHIFT_327 = SHIFTL0(op_ADD_325, SN(32, 0)); + RzILOpPure *op_EQ_328 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_LSHIFT_279), SN(32, 0), SN(32, 0x20)), op_LSHIFT_327); + RzILOpPure *op_RSHIFT_378 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_381 = LOGAND(op_RSHIFT_378, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_387 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_390 = LOGAND(op_RSHIFT_387, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_393 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_381), DUP(op_AND_381))), CAST(16, MSB(DUP(op_AND_381)), DUP(op_AND_381))), CAST(32, MSB(CAST(16, MSB(op_AND_390), DUP(op_AND_390))), CAST(16, MSB(DUP(op_AND_390)), DUP(op_AND_390)))); + RzILOpPure *op_RSHIFT_398 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_401 = LOGAND(op_RSHIFT_398, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_407 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_410 = LOGAND(op_RSHIFT_407, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_413 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_401), DUP(op_AND_401))), CAST(16, MSB(DUP(op_AND_401)), DUP(op_AND_401))), CAST(32, MSB(CAST(16, MSB(op_AND_410), DUP(op_AND_410))), CAST(16, MSB(DUP(op_AND_410)), DUP(op_AND_410)))); + RzILOpPure *op_ADD_415 = ADD(CAST(64, MSB(op_MUL_393), DUP(op_MUL_393)), CAST(64, MSB(op_MUL_413), DUP(op_MUL_413))); + RzILOpPure *op_LSHIFT_417 = SHIFTL0(op_ADD_415, SN(32, 0)); + RzILOpPure *op_LT_420 = SLT(op_LSHIFT_417, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_425 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_426 = NEG(op_LSHIFT_425); + RzILOpPure *op_LSHIFT_431 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_434 = SUB(op_LSHIFT_431, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_435 = ITE(op_LT_420, op_NEG_426, op_SUB_434); + RzILOpEffect *gcc_expr_436 = BRANCH(op_EQ_328, EMPTY(), set_usr_field_call_374); + + // h_tmp372 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_438 = SETL("h_tmp372", cond_435); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_439 = SEQN(2, gcc_expr_436, op_ASSIGN_hybrid_tmp_438); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0)) ? (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0) : h_tmp372) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_231 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_232 = LOGNOT(op_LSHIFT_231); + RzILOpPure *op_AND_233 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_232); + RzILOpPure *op_RSHIFT_332 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_335 = LOGAND(op_RSHIFT_332, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_341 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_344 = LOGAND(op_RSHIFT_341, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_347 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_335), DUP(op_AND_335))), CAST(16, MSB(DUP(op_AND_335)), DUP(op_AND_335))), CAST(32, MSB(CAST(16, MSB(op_AND_344), DUP(op_AND_344))), CAST(16, MSB(DUP(op_AND_344)), DUP(op_AND_344)))); + RzILOpPure *op_RSHIFT_352 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_355 = LOGAND(op_RSHIFT_352, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_361 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_364 = LOGAND(op_RSHIFT_361, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_367 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_355), DUP(op_AND_355))), CAST(16, MSB(DUP(op_AND_355)), DUP(op_AND_355))), CAST(32, MSB(CAST(16, MSB(op_AND_364), DUP(op_AND_364))), CAST(16, MSB(DUP(op_AND_364)), DUP(op_AND_364)))); + RzILOpPure *op_ADD_369 = ADD(CAST(64, MSB(op_MUL_347), DUP(op_MUL_347)), CAST(64, MSB(op_MUL_367), DUP(op_MUL_367))); + RzILOpPure *op_LSHIFT_371 = SHIFTL0(op_ADD_369, SN(32, 0)); + RzILOpPure *cond_440 = ITE(DUP(op_EQ_328), op_LSHIFT_371, VARL("h_tmp372")); + RzILOpPure *op_AND_442 = LOGAND(cond_440, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_446 = SHIFTL0(op_AND_442, SN(32, 0x20)); + RzILOpPure *op_OR_447 = LOGOR(op_AND_233, op_LSHIFT_446); + RzILOpEffect *op_ASSIGN_448 = WRITE_REG(bundle, Rdd_op, op_OR_447); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_449 = SEQN(2, seq_439, op_ASSIGN_448); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_225, seq_449); + return instruction_sequence; +} + +// Rdd = vcmpyr(Rss,Rtt):sat +RzILOpEffect *hex_il_op_m2_vcmpy_s0_sat_r(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_150 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_15, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_25 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_28 = LOGAND(op_RSHIFT_25, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_31 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(CAST(16, MSB(op_AND_28), DUP(op_AND_28))), CAST(16, MSB(DUP(op_AND_28)), DUP(op_AND_28)))); + RzILOpPure *op_RSHIFT_36 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_39 = LOGAND(op_RSHIFT_36, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_45 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_48 = LOGAND(op_RSHIFT_45, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_51 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_39), DUP(op_AND_39))), CAST(16, MSB(DUP(op_AND_39)), DUP(op_AND_39))), CAST(32, MSB(CAST(16, MSB(op_AND_48), DUP(op_AND_48))), CAST(16, MSB(DUP(op_AND_48)), DUP(op_AND_48)))); + RzILOpPure *op_SUB_53 = SUB(CAST(64, MSB(op_MUL_31), DUP(op_MUL_31)), CAST(64, MSB(op_MUL_51), DUP(op_MUL_51))); + RzILOpPure *op_LSHIFT_55 = SHIFTL0(op_SUB_53, SN(32, 0)); + RzILOpPure *op_RSHIFT_64 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_67 = LOGAND(op_RSHIFT_64, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_73 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_76 = LOGAND(op_RSHIFT_73, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_79 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_67), DUP(op_AND_67))), CAST(16, MSB(DUP(op_AND_67)), DUP(op_AND_67))), CAST(32, MSB(CAST(16, MSB(op_AND_76), DUP(op_AND_76))), CAST(16, MSB(DUP(op_AND_76)), DUP(op_AND_76)))); + RzILOpPure *op_RSHIFT_84 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_87 = LOGAND(op_RSHIFT_84, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_93 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_96 = LOGAND(op_RSHIFT_93, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_99 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_87), DUP(op_AND_87))), CAST(16, MSB(DUP(op_AND_87)), DUP(op_AND_87))), CAST(32, MSB(CAST(16, MSB(op_AND_96), DUP(op_AND_96))), CAST(16, MSB(DUP(op_AND_96)), DUP(op_AND_96)))); + RzILOpPure *op_SUB_101 = SUB(CAST(64, MSB(op_MUL_79), DUP(op_MUL_79)), CAST(64, MSB(op_MUL_99), DUP(op_MUL_99))); + RzILOpPure *op_LSHIFT_103 = SHIFTL0(op_SUB_101, SN(32, 0)); + RzILOpPure *op_EQ_104 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_LSHIFT_55), SN(32, 0), SN(32, 0x20)), op_LSHIFT_103); + RzILOpPure *op_RSHIFT_154 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_157 = LOGAND(op_RSHIFT_154, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_163 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_166 = LOGAND(op_RSHIFT_163, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_169 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_157), DUP(op_AND_157))), CAST(16, MSB(DUP(op_AND_157)), DUP(op_AND_157))), CAST(32, MSB(CAST(16, MSB(op_AND_166), DUP(op_AND_166))), CAST(16, MSB(DUP(op_AND_166)), DUP(op_AND_166)))); + RzILOpPure *op_RSHIFT_174 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_177 = LOGAND(op_RSHIFT_174, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_183 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_186 = LOGAND(op_RSHIFT_183, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_189 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_177), DUP(op_AND_177))), CAST(16, MSB(DUP(op_AND_177)), DUP(op_AND_177))), CAST(32, MSB(CAST(16, MSB(op_AND_186), DUP(op_AND_186))), CAST(16, MSB(DUP(op_AND_186)), DUP(op_AND_186)))); + RzILOpPure *op_SUB_191 = SUB(CAST(64, MSB(op_MUL_169), DUP(op_MUL_169)), CAST(64, MSB(op_MUL_189), DUP(op_MUL_189))); + RzILOpPure *op_LSHIFT_193 = SHIFTL0(op_SUB_191, SN(32, 0)); + RzILOpPure *op_LT_196 = SLT(op_LSHIFT_193, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_201 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_202 = NEG(op_LSHIFT_201); + RzILOpPure *op_LSHIFT_207 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_210 = SUB(op_LSHIFT_207, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_211 = ITE(op_LT_196, op_NEG_202, op_SUB_210); + RzILOpEffect *gcc_expr_212 = BRANCH(op_EQ_104, EMPTY(), set_usr_field_call_150); + + // h_tmp373 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_214 = SETL("h_tmp373", cond_211); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_215 = SEQN(2, gcc_expr_212, op_ASSIGN_hybrid_tmp_214); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0)) ? (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0) : h_tmp373) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_108 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_111 = LOGAND(op_RSHIFT_108, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_117 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_120 = LOGAND(op_RSHIFT_117, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_123 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_111), DUP(op_AND_111))), CAST(16, MSB(DUP(op_AND_111)), DUP(op_AND_111))), CAST(32, MSB(CAST(16, MSB(op_AND_120), DUP(op_AND_120))), CAST(16, MSB(DUP(op_AND_120)), DUP(op_AND_120)))); + RzILOpPure *op_RSHIFT_128 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_131 = LOGAND(op_RSHIFT_128, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_137 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_140 = LOGAND(op_RSHIFT_137, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_143 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_131), DUP(op_AND_131))), CAST(16, MSB(DUP(op_AND_131)), DUP(op_AND_131))), CAST(32, MSB(CAST(16, MSB(op_AND_140), DUP(op_AND_140))), CAST(16, MSB(DUP(op_AND_140)), DUP(op_AND_140)))); + RzILOpPure *op_SUB_145 = SUB(CAST(64, MSB(op_MUL_123), DUP(op_MUL_123)), CAST(64, MSB(op_MUL_143), DUP(op_MUL_143))); + RzILOpPure *op_LSHIFT_147 = SHIFTL0(op_SUB_145, SN(32, 0)); + RzILOpPure *cond_216 = ITE(DUP(op_EQ_104), op_LSHIFT_147, VARL("h_tmp373")); + RzILOpPure *op_AND_218 = LOGAND(cond_216, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_222 = SHIFTL0(op_AND_218, SN(32, 0)); + RzILOpPure *op_OR_223 = LOGOR(op_AND_7, op_LSHIFT_222); + RzILOpEffect *op_ASSIGN_224 = WRITE_REG(bundle, Rdd_op, op_OR_223); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_225 = SEQN(2, seq_215, op_ASSIGN_224); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_374 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_240 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_243 = LOGAND(op_RSHIFT_240, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_249 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_252 = LOGAND(op_RSHIFT_249, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_255 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_243), DUP(op_AND_243))), CAST(16, MSB(DUP(op_AND_243)), DUP(op_AND_243))), CAST(32, MSB(CAST(16, MSB(op_AND_252), DUP(op_AND_252))), CAST(16, MSB(DUP(op_AND_252)), DUP(op_AND_252)))); + RzILOpPure *op_RSHIFT_260 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_263 = LOGAND(op_RSHIFT_260, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_269 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_272 = LOGAND(op_RSHIFT_269, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_275 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_263), DUP(op_AND_263))), CAST(16, MSB(DUP(op_AND_263)), DUP(op_AND_263))), CAST(32, MSB(CAST(16, MSB(op_AND_272), DUP(op_AND_272))), CAST(16, MSB(DUP(op_AND_272)), DUP(op_AND_272)))); + RzILOpPure *op_SUB_277 = SUB(CAST(64, MSB(op_MUL_255), DUP(op_MUL_255)), CAST(64, MSB(op_MUL_275), DUP(op_MUL_275))); + RzILOpPure *op_LSHIFT_279 = SHIFTL0(op_SUB_277, SN(32, 0)); + RzILOpPure *op_RSHIFT_288 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_291 = LOGAND(op_RSHIFT_288, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_297 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_300 = LOGAND(op_RSHIFT_297, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_303 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_291), DUP(op_AND_291))), CAST(16, MSB(DUP(op_AND_291)), DUP(op_AND_291))), CAST(32, MSB(CAST(16, MSB(op_AND_300), DUP(op_AND_300))), CAST(16, MSB(DUP(op_AND_300)), DUP(op_AND_300)))); + RzILOpPure *op_RSHIFT_308 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_311 = LOGAND(op_RSHIFT_308, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_317 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_320 = LOGAND(op_RSHIFT_317, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_323 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_311), DUP(op_AND_311))), CAST(16, MSB(DUP(op_AND_311)), DUP(op_AND_311))), CAST(32, MSB(CAST(16, MSB(op_AND_320), DUP(op_AND_320))), CAST(16, MSB(DUP(op_AND_320)), DUP(op_AND_320)))); + RzILOpPure *op_SUB_325 = SUB(CAST(64, MSB(op_MUL_303), DUP(op_MUL_303)), CAST(64, MSB(op_MUL_323), DUP(op_MUL_323))); + RzILOpPure *op_LSHIFT_327 = SHIFTL0(op_SUB_325, SN(32, 0)); + RzILOpPure *op_EQ_328 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_LSHIFT_279), SN(32, 0), SN(32, 0x20)), op_LSHIFT_327); + RzILOpPure *op_RSHIFT_378 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_381 = LOGAND(op_RSHIFT_378, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_387 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_390 = LOGAND(op_RSHIFT_387, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_393 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_381), DUP(op_AND_381))), CAST(16, MSB(DUP(op_AND_381)), DUP(op_AND_381))), CAST(32, MSB(CAST(16, MSB(op_AND_390), DUP(op_AND_390))), CAST(16, MSB(DUP(op_AND_390)), DUP(op_AND_390)))); + RzILOpPure *op_RSHIFT_398 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_401 = LOGAND(op_RSHIFT_398, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_407 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_410 = LOGAND(op_RSHIFT_407, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_413 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_401), DUP(op_AND_401))), CAST(16, MSB(DUP(op_AND_401)), DUP(op_AND_401))), CAST(32, MSB(CAST(16, MSB(op_AND_410), DUP(op_AND_410))), CAST(16, MSB(DUP(op_AND_410)), DUP(op_AND_410)))); + RzILOpPure *op_SUB_415 = SUB(CAST(64, MSB(op_MUL_393), DUP(op_MUL_393)), CAST(64, MSB(op_MUL_413), DUP(op_MUL_413))); + RzILOpPure *op_LSHIFT_417 = SHIFTL0(op_SUB_415, SN(32, 0)); + RzILOpPure *op_LT_420 = SLT(op_LSHIFT_417, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_425 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_426 = NEG(op_LSHIFT_425); + RzILOpPure *op_LSHIFT_431 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_434 = SUB(op_LSHIFT_431, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_435 = ITE(op_LT_420, op_NEG_426, op_SUB_434); + RzILOpEffect *gcc_expr_436 = BRANCH(op_EQ_328, EMPTY(), set_usr_field_call_374); + + // h_tmp374 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_438 = SETL("h_tmp374", cond_435); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_439 = SEQN(2, gcc_expr_436, op_ASSIGN_hybrid_tmp_438); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0)) ? (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0) : h_tmp374) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_231 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_232 = LOGNOT(op_LSHIFT_231); + RzILOpPure *op_AND_233 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_232); + RzILOpPure *op_RSHIFT_332 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_335 = LOGAND(op_RSHIFT_332, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_341 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_344 = LOGAND(op_RSHIFT_341, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_347 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_335), DUP(op_AND_335))), CAST(16, MSB(DUP(op_AND_335)), DUP(op_AND_335))), CAST(32, MSB(CAST(16, MSB(op_AND_344), DUP(op_AND_344))), CAST(16, MSB(DUP(op_AND_344)), DUP(op_AND_344)))); + RzILOpPure *op_RSHIFT_352 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_355 = LOGAND(op_RSHIFT_352, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_361 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_364 = LOGAND(op_RSHIFT_361, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_367 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_355), DUP(op_AND_355))), CAST(16, MSB(DUP(op_AND_355)), DUP(op_AND_355))), CAST(32, MSB(CAST(16, MSB(op_AND_364), DUP(op_AND_364))), CAST(16, MSB(DUP(op_AND_364)), DUP(op_AND_364)))); + RzILOpPure *op_SUB_369 = SUB(CAST(64, MSB(op_MUL_347), DUP(op_MUL_347)), CAST(64, MSB(op_MUL_367), DUP(op_MUL_367))); + RzILOpPure *op_LSHIFT_371 = SHIFTL0(op_SUB_369, SN(32, 0)); + RzILOpPure *cond_440 = ITE(DUP(op_EQ_328), op_LSHIFT_371, VARL("h_tmp374")); + RzILOpPure *op_AND_442 = LOGAND(cond_440, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_446 = SHIFTL0(op_AND_442, SN(32, 0x20)); + RzILOpPure *op_OR_447 = LOGOR(op_AND_233, op_LSHIFT_446); + RzILOpEffect *op_ASSIGN_448 = WRITE_REG(bundle, Rdd_op, op_OR_447); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_449 = SEQN(2, seq_439, op_ASSIGN_448); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_225, seq_449); + return instruction_sequence; +} + +// Rdd = vcmpyi(Rss,Rtt):<<1:sat +RzILOpEffect *hex_il_op_m2_vcmpy_s1_sat_i(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_150 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rss, SN(32, 16)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_15, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_25 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_28 = LOGAND(op_RSHIFT_25, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_31 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(CAST(16, MSB(op_AND_28), DUP(op_AND_28))), CAST(16, MSB(DUP(op_AND_28)), DUP(op_AND_28)))); + RzILOpPure *op_RSHIFT_36 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_39 = LOGAND(op_RSHIFT_36, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_45 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_48 = LOGAND(op_RSHIFT_45, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_51 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_39), DUP(op_AND_39))), CAST(16, MSB(DUP(op_AND_39)), DUP(op_AND_39))), CAST(32, MSB(CAST(16, MSB(op_AND_48), DUP(op_AND_48))), CAST(16, MSB(DUP(op_AND_48)), DUP(op_AND_48)))); + RzILOpPure *op_ADD_53 = ADD(CAST(64, MSB(op_MUL_31), DUP(op_MUL_31)), CAST(64, MSB(op_MUL_51), DUP(op_MUL_51))); + RzILOpPure *op_LSHIFT_55 = SHIFTL0(op_ADD_53, SN(32, 1)); + RzILOpPure *op_RSHIFT_64 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_67 = LOGAND(op_RSHIFT_64, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_73 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_76 = LOGAND(op_RSHIFT_73, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_79 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_67), DUP(op_AND_67))), CAST(16, MSB(DUP(op_AND_67)), DUP(op_AND_67))), CAST(32, MSB(CAST(16, MSB(op_AND_76), DUP(op_AND_76))), CAST(16, MSB(DUP(op_AND_76)), DUP(op_AND_76)))); + RzILOpPure *op_RSHIFT_84 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_87 = LOGAND(op_RSHIFT_84, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_93 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_96 = LOGAND(op_RSHIFT_93, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_99 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_87), DUP(op_AND_87))), CAST(16, MSB(DUP(op_AND_87)), DUP(op_AND_87))), CAST(32, MSB(CAST(16, MSB(op_AND_96), DUP(op_AND_96))), CAST(16, MSB(DUP(op_AND_96)), DUP(op_AND_96)))); + RzILOpPure *op_ADD_101 = ADD(CAST(64, MSB(op_MUL_79), DUP(op_MUL_79)), CAST(64, MSB(op_MUL_99), DUP(op_MUL_99))); + RzILOpPure *op_LSHIFT_103 = SHIFTL0(op_ADD_101, SN(32, 1)); + RzILOpPure *op_EQ_104 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_LSHIFT_55), SN(32, 0), SN(32, 0x20)), op_LSHIFT_103); + RzILOpPure *op_RSHIFT_154 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_157 = LOGAND(op_RSHIFT_154, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_163 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_166 = LOGAND(op_RSHIFT_163, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_169 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_157), DUP(op_AND_157))), CAST(16, MSB(DUP(op_AND_157)), DUP(op_AND_157))), CAST(32, MSB(CAST(16, MSB(op_AND_166), DUP(op_AND_166))), CAST(16, MSB(DUP(op_AND_166)), DUP(op_AND_166)))); + RzILOpPure *op_RSHIFT_174 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_177 = LOGAND(op_RSHIFT_174, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_183 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_186 = LOGAND(op_RSHIFT_183, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_189 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_177), DUP(op_AND_177))), CAST(16, MSB(DUP(op_AND_177)), DUP(op_AND_177))), CAST(32, MSB(CAST(16, MSB(op_AND_186), DUP(op_AND_186))), CAST(16, MSB(DUP(op_AND_186)), DUP(op_AND_186)))); + RzILOpPure *op_ADD_191 = ADD(CAST(64, MSB(op_MUL_169), DUP(op_MUL_169)), CAST(64, MSB(op_MUL_189), DUP(op_MUL_189))); + RzILOpPure *op_LSHIFT_193 = SHIFTL0(op_ADD_191, SN(32, 1)); + RzILOpPure *op_LT_196 = SLT(op_LSHIFT_193, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_201 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_202 = NEG(op_LSHIFT_201); + RzILOpPure *op_LSHIFT_207 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_210 = SUB(op_LSHIFT_207, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_211 = ITE(op_LT_196, op_NEG_202, op_SUB_210); + RzILOpEffect *gcc_expr_212 = BRANCH(op_EQ_104, EMPTY(), set_usr_field_call_150); + + // h_tmp375 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_214 = SETL("h_tmp375", cond_211); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_215 = SEQN(2, gcc_expr_212, op_ASSIGN_hybrid_tmp_214); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1)) ? (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1) : h_tmp375) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_108 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_111 = LOGAND(op_RSHIFT_108, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_117 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_120 = LOGAND(op_RSHIFT_117, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_123 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_111), DUP(op_AND_111))), CAST(16, MSB(DUP(op_AND_111)), DUP(op_AND_111))), CAST(32, MSB(CAST(16, MSB(op_AND_120), DUP(op_AND_120))), CAST(16, MSB(DUP(op_AND_120)), DUP(op_AND_120)))); + RzILOpPure *op_RSHIFT_128 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_131 = LOGAND(op_RSHIFT_128, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_137 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_140 = LOGAND(op_RSHIFT_137, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_143 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_131), DUP(op_AND_131))), CAST(16, MSB(DUP(op_AND_131)), DUP(op_AND_131))), CAST(32, MSB(CAST(16, MSB(op_AND_140), DUP(op_AND_140))), CAST(16, MSB(DUP(op_AND_140)), DUP(op_AND_140)))); + RzILOpPure *op_ADD_145 = ADD(CAST(64, MSB(op_MUL_123), DUP(op_MUL_123)), CAST(64, MSB(op_MUL_143), DUP(op_MUL_143))); + RzILOpPure *op_LSHIFT_147 = SHIFTL0(op_ADD_145, SN(32, 1)); + RzILOpPure *cond_216 = ITE(DUP(op_EQ_104), op_LSHIFT_147, VARL("h_tmp375")); + RzILOpPure *op_AND_218 = LOGAND(cond_216, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_222 = SHIFTL0(op_AND_218, SN(32, 0)); + RzILOpPure *op_OR_223 = LOGOR(op_AND_7, op_LSHIFT_222); + RzILOpEffect *op_ASSIGN_224 = WRITE_REG(bundle, Rdd_op, op_OR_223); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_225 = SEQN(2, seq_215, op_ASSIGN_224); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_374 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_240 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_243 = LOGAND(op_RSHIFT_240, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_249 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_252 = LOGAND(op_RSHIFT_249, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_255 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_243), DUP(op_AND_243))), CAST(16, MSB(DUP(op_AND_243)), DUP(op_AND_243))), CAST(32, MSB(CAST(16, MSB(op_AND_252), DUP(op_AND_252))), CAST(16, MSB(DUP(op_AND_252)), DUP(op_AND_252)))); + RzILOpPure *op_RSHIFT_260 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_263 = LOGAND(op_RSHIFT_260, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_269 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_272 = LOGAND(op_RSHIFT_269, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_275 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_263), DUP(op_AND_263))), CAST(16, MSB(DUP(op_AND_263)), DUP(op_AND_263))), CAST(32, MSB(CAST(16, MSB(op_AND_272), DUP(op_AND_272))), CAST(16, MSB(DUP(op_AND_272)), DUP(op_AND_272)))); + RzILOpPure *op_ADD_277 = ADD(CAST(64, MSB(op_MUL_255), DUP(op_MUL_255)), CAST(64, MSB(op_MUL_275), DUP(op_MUL_275))); + RzILOpPure *op_LSHIFT_279 = SHIFTL0(op_ADD_277, SN(32, 1)); + RzILOpPure *op_RSHIFT_288 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_291 = LOGAND(op_RSHIFT_288, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_297 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_300 = LOGAND(op_RSHIFT_297, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_303 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_291), DUP(op_AND_291))), CAST(16, MSB(DUP(op_AND_291)), DUP(op_AND_291))), CAST(32, MSB(CAST(16, MSB(op_AND_300), DUP(op_AND_300))), CAST(16, MSB(DUP(op_AND_300)), DUP(op_AND_300)))); + RzILOpPure *op_RSHIFT_308 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_311 = LOGAND(op_RSHIFT_308, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_317 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_320 = LOGAND(op_RSHIFT_317, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_323 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_311), DUP(op_AND_311))), CAST(16, MSB(DUP(op_AND_311)), DUP(op_AND_311))), CAST(32, MSB(CAST(16, MSB(op_AND_320), DUP(op_AND_320))), CAST(16, MSB(DUP(op_AND_320)), DUP(op_AND_320)))); + RzILOpPure *op_ADD_325 = ADD(CAST(64, MSB(op_MUL_303), DUP(op_MUL_303)), CAST(64, MSB(op_MUL_323), DUP(op_MUL_323))); + RzILOpPure *op_LSHIFT_327 = SHIFTL0(op_ADD_325, SN(32, 1)); + RzILOpPure *op_EQ_328 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_LSHIFT_279), SN(32, 0), SN(32, 0x20)), op_LSHIFT_327); + RzILOpPure *op_RSHIFT_378 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_381 = LOGAND(op_RSHIFT_378, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_387 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_390 = LOGAND(op_RSHIFT_387, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_393 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_381), DUP(op_AND_381))), CAST(16, MSB(DUP(op_AND_381)), DUP(op_AND_381))), CAST(32, MSB(CAST(16, MSB(op_AND_390), DUP(op_AND_390))), CAST(16, MSB(DUP(op_AND_390)), DUP(op_AND_390)))); + RzILOpPure *op_RSHIFT_398 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_401 = LOGAND(op_RSHIFT_398, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_407 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_410 = LOGAND(op_RSHIFT_407, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_413 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_401), DUP(op_AND_401))), CAST(16, MSB(DUP(op_AND_401)), DUP(op_AND_401))), CAST(32, MSB(CAST(16, MSB(op_AND_410), DUP(op_AND_410))), CAST(16, MSB(DUP(op_AND_410)), DUP(op_AND_410)))); + RzILOpPure *op_ADD_415 = ADD(CAST(64, MSB(op_MUL_393), DUP(op_MUL_393)), CAST(64, MSB(op_MUL_413), DUP(op_MUL_413))); + RzILOpPure *op_LSHIFT_417 = SHIFTL0(op_ADD_415, SN(32, 1)); + RzILOpPure *op_LT_420 = SLT(op_LSHIFT_417, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_425 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_426 = NEG(op_LSHIFT_425); + RzILOpPure *op_LSHIFT_431 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_434 = SUB(op_LSHIFT_431, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_435 = ITE(op_LT_420, op_NEG_426, op_SUB_434); + RzILOpEffect *gcc_expr_436 = BRANCH(op_EQ_328, EMPTY(), set_usr_field_call_374); + + // h_tmp376 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_438 = SETL("h_tmp376", cond_435); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_439 = SEQN(2, gcc_expr_436, op_ASSIGN_hybrid_tmp_438); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1)) ? (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1) : h_tmp376) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_231 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_232 = LOGNOT(op_LSHIFT_231); + RzILOpPure *op_AND_233 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_232); + RzILOpPure *op_RSHIFT_332 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_335 = LOGAND(op_RSHIFT_332, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_341 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_344 = LOGAND(op_RSHIFT_341, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_347 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_335), DUP(op_AND_335))), CAST(16, MSB(DUP(op_AND_335)), DUP(op_AND_335))), CAST(32, MSB(CAST(16, MSB(op_AND_344), DUP(op_AND_344))), CAST(16, MSB(DUP(op_AND_344)), DUP(op_AND_344)))); + RzILOpPure *op_RSHIFT_352 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_355 = LOGAND(op_RSHIFT_352, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_361 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_364 = LOGAND(op_RSHIFT_361, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_367 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_355), DUP(op_AND_355))), CAST(16, MSB(DUP(op_AND_355)), DUP(op_AND_355))), CAST(32, MSB(CAST(16, MSB(op_AND_364), DUP(op_AND_364))), CAST(16, MSB(DUP(op_AND_364)), DUP(op_AND_364)))); + RzILOpPure *op_ADD_369 = ADD(CAST(64, MSB(op_MUL_347), DUP(op_MUL_347)), CAST(64, MSB(op_MUL_367), DUP(op_MUL_367))); + RzILOpPure *op_LSHIFT_371 = SHIFTL0(op_ADD_369, SN(32, 1)); + RzILOpPure *cond_440 = ITE(DUP(op_EQ_328), op_LSHIFT_371, VARL("h_tmp376")); + RzILOpPure *op_AND_442 = LOGAND(cond_440, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_446 = SHIFTL0(op_AND_442, SN(32, 0x20)); + RzILOpPure *op_OR_447 = LOGOR(op_AND_233, op_LSHIFT_446); + RzILOpEffect *op_ASSIGN_448 = WRITE_REG(bundle, Rdd_op, op_OR_447); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_449 = SEQN(2, seq_439, op_ASSIGN_448); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_225, seq_449); + return instruction_sequence; +} + +// Rdd = vcmpyr(Rss,Rtt):<<1:sat +RzILOpEffect *hex_il_op_m2_vcmpy_s1_sat_r(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_150 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_15, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_25 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_28 = LOGAND(op_RSHIFT_25, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_31 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(CAST(16, MSB(op_AND_28), DUP(op_AND_28))), CAST(16, MSB(DUP(op_AND_28)), DUP(op_AND_28)))); + RzILOpPure *op_RSHIFT_36 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_39 = LOGAND(op_RSHIFT_36, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_45 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_48 = LOGAND(op_RSHIFT_45, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_51 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_39), DUP(op_AND_39))), CAST(16, MSB(DUP(op_AND_39)), DUP(op_AND_39))), CAST(32, MSB(CAST(16, MSB(op_AND_48), DUP(op_AND_48))), CAST(16, MSB(DUP(op_AND_48)), DUP(op_AND_48)))); + RzILOpPure *op_SUB_53 = SUB(CAST(64, MSB(op_MUL_31), DUP(op_MUL_31)), CAST(64, MSB(op_MUL_51), DUP(op_MUL_51))); + RzILOpPure *op_LSHIFT_55 = SHIFTL0(op_SUB_53, SN(32, 1)); + RzILOpPure *op_RSHIFT_64 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_67 = LOGAND(op_RSHIFT_64, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_73 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_76 = LOGAND(op_RSHIFT_73, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_79 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_67), DUP(op_AND_67))), CAST(16, MSB(DUP(op_AND_67)), DUP(op_AND_67))), CAST(32, MSB(CAST(16, MSB(op_AND_76), DUP(op_AND_76))), CAST(16, MSB(DUP(op_AND_76)), DUP(op_AND_76)))); + RzILOpPure *op_RSHIFT_84 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_87 = LOGAND(op_RSHIFT_84, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_93 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_96 = LOGAND(op_RSHIFT_93, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_99 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_87), DUP(op_AND_87))), CAST(16, MSB(DUP(op_AND_87)), DUP(op_AND_87))), CAST(32, MSB(CAST(16, MSB(op_AND_96), DUP(op_AND_96))), CAST(16, MSB(DUP(op_AND_96)), DUP(op_AND_96)))); + RzILOpPure *op_SUB_101 = SUB(CAST(64, MSB(op_MUL_79), DUP(op_MUL_79)), CAST(64, MSB(op_MUL_99), DUP(op_MUL_99))); + RzILOpPure *op_LSHIFT_103 = SHIFTL0(op_SUB_101, SN(32, 1)); + RzILOpPure *op_EQ_104 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_LSHIFT_55), SN(32, 0), SN(32, 0x20)), op_LSHIFT_103); + RzILOpPure *op_RSHIFT_154 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_157 = LOGAND(op_RSHIFT_154, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_163 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_166 = LOGAND(op_RSHIFT_163, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_169 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_157), DUP(op_AND_157))), CAST(16, MSB(DUP(op_AND_157)), DUP(op_AND_157))), CAST(32, MSB(CAST(16, MSB(op_AND_166), DUP(op_AND_166))), CAST(16, MSB(DUP(op_AND_166)), DUP(op_AND_166)))); + RzILOpPure *op_RSHIFT_174 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_177 = LOGAND(op_RSHIFT_174, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_183 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_186 = LOGAND(op_RSHIFT_183, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_189 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_177), DUP(op_AND_177))), CAST(16, MSB(DUP(op_AND_177)), DUP(op_AND_177))), CAST(32, MSB(CAST(16, MSB(op_AND_186), DUP(op_AND_186))), CAST(16, MSB(DUP(op_AND_186)), DUP(op_AND_186)))); + RzILOpPure *op_SUB_191 = SUB(CAST(64, MSB(op_MUL_169), DUP(op_MUL_169)), CAST(64, MSB(op_MUL_189), DUP(op_MUL_189))); + RzILOpPure *op_LSHIFT_193 = SHIFTL0(op_SUB_191, SN(32, 1)); + RzILOpPure *op_LT_196 = SLT(op_LSHIFT_193, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_201 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_202 = NEG(op_LSHIFT_201); + RzILOpPure *op_LSHIFT_207 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_210 = SUB(op_LSHIFT_207, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_211 = ITE(op_LT_196, op_NEG_202, op_SUB_210); + RzILOpEffect *gcc_expr_212 = BRANCH(op_EQ_104, EMPTY(), set_usr_field_call_150); + + // h_tmp377 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_214 = SETL("h_tmp377", cond_211); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_215 = SEQN(2, gcc_expr_212, op_ASSIGN_hybrid_tmp_214); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1)) ? (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1) : h_tmp377) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_108 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_111 = LOGAND(op_RSHIFT_108, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_117 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_120 = LOGAND(op_RSHIFT_117, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_123 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_111), DUP(op_AND_111))), CAST(16, MSB(DUP(op_AND_111)), DUP(op_AND_111))), CAST(32, MSB(CAST(16, MSB(op_AND_120), DUP(op_AND_120))), CAST(16, MSB(DUP(op_AND_120)), DUP(op_AND_120)))); + RzILOpPure *op_RSHIFT_128 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_131 = LOGAND(op_RSHIFT_128, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_137 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_140 = LOGAND(op_RSHIFT_137, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_143 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_131), DUP(op_AND_131))), CAST(16, MSB(DUP(op_AND_131)), DUP(op_AND_131))), CAST(32, MSB(CAST(16, MSB(op_AND_140), DUP(op_AND_140))), CAST(16, MSB(DUP(op_AND_140)), DUP(op_AND_140)))); + RzILOpPure *op_SUB_145 = SUB(CAST(64, MSB(op_MUL_123), DUP(op_MUL_123)), CAST(64, MSB(op_MUL_143), DUP(op_MUL_143))); + RzILOpPure *op_LSHIFT_147 = SHIFTL0(op_SUB_145, SN(32, 1)); + RzILOpPure *cond_216 = ITE(DUP(op_EQ_104), op_LSHIFT_147, VARL("h_tmp377")); + RzILOpPure *op_AND_218 = LOGAND(cond_216, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_222 = SHIFTL0(op_AND_218, SN(32, 0)); + RzILOpPure *op_OR_223 = LOGOR(op_AND_7, op_LSHIFT_222); + RzILOpEffect *op_ASSIGN_224 = WRITE_REG(bundle, Rdd_op, op_OR_223); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_225 = SEQN(2, seq_215, op_ASSIGN_224); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_374 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_240 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_243 = LOGAND(op_RSHIFT_240, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_249 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_252 = LOGAND(op_RSHIFT_249, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_255 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_243), DUP(op_AND_243))), CAST(16, MSB(DUP(op_AND_243)), DUP(op_AND_243))), CAST(32, MSB(CAST(16, MSB(op_AND_252), DUP(op_AND_252))), CAST(16, MSB(DUP(op_AND_252)), DUP(op_AND_252)))); + RzILOpPure *op_RSHIFT_260 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_263 = LOGAND(op_RSHIFT_260, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_269 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_272 = LOGAND(op_RSHIFT_269, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_275 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_263), DUP(op_AND_263))), CAST(16, MSB(DUP(op_AND_263)), DUP(op_AND_263))), CAST(32, MSB(CAST(16, MSB(op_AND_272), DUP(op_AND_272))), CAST(16, MSB(DUP(op_AND_272)), DUP(op_AND_272)))); + RzILOpPure *op_SUB_277 = SUB(CAST(64, MSB(op_MUL_255), DUP(op_MUL_255)), CAST(64, MSB(op_MUL_275), DUP(op_MUL_275))); + RzILOpPure *op_LSHIFT_279 = SHIFTL0(op_SUB_277, SN(32, 1)); + RzILOpPure *op_RSHIFT_288 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_291 = LOGAND(op_RSHIFT_288, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_297 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_300 = LOGAND(op_RSHIFT_297, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_303 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_291), DUP(op_AND_291))), CAST(16, MSB(DUP(op_AND_291)), DUP(op_AND_291))), CAST(32, MSB(CAST(16, MSB(op_AND_300), DUP(op_AND_300))), CAST(16, MSB(DUP(op_AND_300)), DUP(op_AND_300)))); + RzILOpPure *op_RSHIFT_308 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_311 = LOGAND(op_RSHIFT_308, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_317 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_320 = LOGAND(op_RSHIFT_317, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_323 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_311), DUP(op_AND_311))), CAST(16, MSB(DUP(op_AND_311)), DUP(op_AND_311))), CAST(32, MSB(CAST(16, MSB(op_AND_320), DUP(op_AND_320))), CAST(16, MSB(DUP(op_AND_320)), DUP(op_AND_320)))); + RzILOpPure *op_SUB_325 = SUB(CAST(64, MSB(op_MUL_303), DUP(op_MUL_303)), CAST(64, MSB(op_MUL_323), DUP(op_MUL_323))); + RzILOpPure *op_LSHIFT_327 = SHIFTL0(op_SUB_325, SN(32, 1)); + RzILOpPure *op_EQ_328 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_LSHIFT_279), SN(32, 0), SN(32, 0x20)), op_LSHIFT_327); + RzILOpPure *op_RSHIFT_378 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_381 = LOGAND(op_RSHIFT_378, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_387 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_390 = LOGAND(op_RSHIFT_387, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_393 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_381), DUP(op_AND_381))), CAST(16, MSB(DUP(op_AND_381)), DUP(op_AND_381))), CAST(32, MSB(CAST(16, MSB(op_AND_390), DUP(op_AND_390))), CAST(16, MSB(DUP(op_AND_390)), DUP(op_AND_390)))); + RzILOpPure *op_RSHIFT_398 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_401 = LOGAND(op_RSHIFT_398, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_407 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_410 = LOGAND(op_RSHIFT_407, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_413 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_401), DUP(op_AND_401))), CAST(16, MSB(DUP(op_AND_401)), DUP(op_AND_401))), CAST(32, MSB(CAST(16, MSB(op_AND_410), DUP(op_AND_410))), CAST(16, MSB(DUP(op_AND_410)), DUP(op_AND_410)))); + RzILOpPure *op_SUB_415 = SUB(CAST(64, MSB(op_MUL_393), DUP(op_MUL_393)), CAST(64, MSB(op_MUL_413), DUP(op_MUL_413))); + RzILOpPure *op_LSHIFT_417 = SHIFTL0(op_SUB_415, SN(32, 1)); + RzILOpPure *op_LT_420 = SLT(op_LSHIFT_417, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_425 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_426 = NEG(op_LSHIFT_425); + RzILOpPure *op_LSHIFT_431 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_434 = SUB(op_LSHIFT_431, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_435 = ITE(op_LT_420, op_NEG_426, op_SUB_434); + RzILOpEffect *gcc_expr_436 = BRANCH(op_EQ_328, EMPTY(), set_usr_field_call_374); + + // h_tmp378 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_438 = SETL("h_tmp378", cond_435); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_439 = SEQN(2, gcc_expr_436, op_ASSIGN_hybrid_tmp_438); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1)) ? (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1) : h_tmp378) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_231 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_232 = LOGNOT(op_LSHIFT_231); + RzILOpPure *op_AND_233 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_232); + RzILOpPure *op_RSHIFT_332 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_335 = LOGAND(op_RSHIFT_332, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_341 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_344 = LOGAND(op_RSHIFT_341, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_347 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_335), DUP(op_AND_335))), CAST(16, MSB(DUP(op_AND_335)), DUP(op_AND_335))), CAST(32, MSB(CAST(16, MSB(op_AND_344), DUP(op_AND_344))), CAST(16, MSB(DUP(op_AND_344)), DUP(op_AND_344)))); + RzILOpPure *op_RSHIFT_352 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_355 = LOGAND(op_RSHIFT_352, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_361 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_364 = LOGAND(op_RSHIFT_361, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_367 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_355), DUP(op_AND_355))), CAST(16, MSB(DUP(op_AND_355)), DUP(op_AND_355))), CAST(32, MSB(CAST(16, MSB(op_AND_364), DUP(op_AND_364))), CAST(16, MSB(DUP(op_AND_364)), DUP(op_AND_364)))); + RzILOpPure *op_SUB_369 = SUB(CAST(64, MSB(op_MUL_347), DUP(op_MUL_347)), CAST(64, MSB(op_MUL_367), DUP(op_MUL_367))); + RzILOpPure *op_LSHIFT_371 = SHIFTL0(op_SUB_369, SN(32, 1)); + RzILOpPure *cond_440 = ITE(DUP(op_EQ_328), op_LSHIFT_371, VARL("h_tmp378")); + RzILOpPure *op_AND_442 = LOGAND(cond_440, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_446 = SHIFTL0(op_AND_442, SN(32, 0x20)); + RzILOpPure *op_OR_447 = LOGOR(op_AND_233, op_LSHIFT_446); + RzILOpEffect *op_ASSIGN_448 = WRITE_REG(bundle, Rdd_op, op_OR_447); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_449 = SEQN(2, seq_439, op_ASSIGN_448); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_225, seq_449); + return instruction_sequence; +} + +// Rxx += vdmpy(Rss,Rtt):sat +RzILOpEffect *hex_il_op_m2_vdmacs_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_183 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_26 = LOGAND(op_RSHIFT_23, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_33 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_36 = LOGAND(op_RSHIFT_33, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_39 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_26), DUP(op_AND_26))), CAST(16, MSB(DUP(op_AND_26)), DUP(op_AND_26))), CAST(32, MSB(CAST(16, MSB(op_AND_36), DUP(op_AND_36))), CAST(16, MSB(DUP(op_AND_36)), DUP(op_AND_36)))); + RzILOpPure *op_LSHIFT_42 = SHIFTL0(CAST(64, MSB(op_MUL_39), DUP(op_MUL_39)), SN(32, 0)); + RzILOpPure *op_ADD_43 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_16), DUP(op_AND_16))), CAST(32, MSB(DUP(op_AND_16)), DUP(op_AND_16))), op_LSHIFT_42); + RzILOpPure *op_RSHIFT_47 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_50 = LOGAND(op_RSHIFT_47, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_56 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_59 = LOGAND(op_RSHIFT_56, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_62 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_50), DUP(op_AND_50))), CAST(16, MSB(DUP(op_AND_50)), DUP(op_AND_50))), CAST(32, MSB(CAST(16, MSB(op_AND_59), DUP(op_AND_59))), CAST(16, MSB(DUP(op_AND_59)), DUP(op_AND_59)))); + RzILOpPure *op_LSHIFT_65 = SHIFTL0(CAST(64, MSB(op_MUL_62), DUP(op_MUL_62)), SN(32, 0)); + RzILOpPure *op_ADD_66 = ADD(op_ADD_43, op_LSHIFT_65); + RzILOpPure *op_RSHIFT_75 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_77 = LOGAND(op_RSHIFT_75, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_83 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_86 = LOGAND(op_RSHIFT_83, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_92 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_95 = LOGAND(op_RSHIFT_92, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_98 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_86), DUP(op_AND_86))), CAST(16, MSB(DUP(op_AND_86)), DUP(op_AND_86))), CAST(32, MSB(CAST(16, MSB(op_AND_95), DUP(op_AND_95))), CAST(16, MSB(DUP(op_AND_95)), DUP(op_AND_95)))); + RzILOpPure *op_LSHIFT_101 = SHIFTL0(CAST(64, MSB(op_MUL_98), DUP(op_MUL_98)), SN(32, 0)); + RzILOpPure *op_ADD_102 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_77), DUP(op_AND_77))), CAST(32, MSB(DUP(op_AND_77)), DUP(op_AND_77))), op_LSHIFT_101); + RzILOpPure *op_RSHIFT_106 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_109 = LOGAND(op_RSHIFT_106, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_115 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_118 = LOGAND(op_RSHIFT_115, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_121 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_109), DUP(op_AND_109))), CAST(16, MSB(DUP(op_AND_109)), DUP(op_AND_109))), CAST(32, MSB(CAST(16, MSB(op_AND_118), DUP(op_AND_118))), CAST(16, MSB(DUP(op_AND_118)), DUP(op_AND_118)))); + RzILOpPure *op_LSHIFT_124 = SHIFTL0(CAST(64, MSB(op_MUL_121), DUP(op_MUL_121)), SN(32, 0)); + RzILOpPure *op_ADD_125 = ADD(op_ADD_102, op_LSHIFT_124); + RzILOpPure *op_EQ_126 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_66), SN(32, 0), SN(32, 0x20)), op_ADD_125); + RzILOpPure *op_RSHIFT_187 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_189 = LOGAND(op_RSHIFT_187, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_195 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_198 = LOGAND(op_RSHIFT_195, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_204 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_207 = LOGAND(op_RSHIFT_204, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_210 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_198), DUP(op_AND_198))), CAST(16, MSB(DUP(op_AND_198)), DUP(op_AND_198))), CAST(32, MSB(CAST(16, MSB(op_AND_207), DUP(op_AND_207))), CAST(16, MSB(DUP(op_AND_207)), DUP(op_AND_207)))); + RzILOpPure *op_LSHIFT_213 = SHIFTL0(CAST(64, MSB(op_MUL_210), DUP(op_MUL_210)), SN(32, 0)); + RzILOpPure *op_ADD_214 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_189), DUP(op_AND_189))), CAST(32, MSB(DUP(op_AND_189)), DUP(op_AND_189))), op_LSHIFT_213); + RzILOpPure *op_RSHIFT_218 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_221 = LOGAND(op_RSHIFT_218, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_227 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_230 = LOGAND(op_RSHIFT_227, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_233 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_221), DUP(op_AND_221))), CAST(16, MSB(DUP(op_AND_221)), DUP(op_AND_221))), CAST(32, MSB(CAST(16, MSB(op_AND_230), DUP(op_AND_230))), CAST(16, MSB(DUP(op_AND_230)), DUP(op_AND_230)))); + RzILOpPure *op_LSHIFT_236 = SHIFTL0(CAST(64, MSB(op_MUL_233), DUP(op_MUL_233)), SN(32, 0)); + RzILOpPure *op_ADD_237 = ADD(op_ADD_214, op_LSHIFT_236); + RzILOpPure *op_LT_240 = SLT(op_ADD_237, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_245 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_246 = NEG(op_LSHIFT_245); + RzILOpPure *op_LSHIFT_251 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_254 = SUB(op_LSHIFT_251, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_255 = ITE(op_LT_240, op_NEG_246, op_SUB_254); + RzILOpEffect *gcc_expr_256 = BRANCH(op_EQ_126, EMPTY(), set_usr_field_call_183); + + // h_tmp379 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_258 = SETL("h_tmp379", cond_255); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_259 = SEQN(2, gcc_expr_256, op_ASSIGN_hybrid_tmp_258); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0)) ? ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0) : h_tmp379) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_130 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_132 = LOGAND(op_RSHIFT_130, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_138 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_141 = LOGAND(op_RSHIFT_138, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_147 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_150 = LOGAND(op_RSHIFT_147, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_153 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_141), DUP(op_AND_141))), CAST(16, MSB(DUP(op_AND_141)), DUP(op_AND_141))), CAST(32, MSB(CAST(16, MSB(op_AND_150), DUP(op_AND_150))), CAST(16, MSB(DUP(op_AND_150)), DUP(op_AND_150)))); + RzILOpPure *op_LSHIFT_156 = SHIFTL0(CAST(64, MSB(op_MUL_153), DUP(op_MUL_153)), SN(32, 0)); + RzILOpPure *op_ADD_157 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_132), DUP(op_AND_132))), CAST(32, MSB(DUP(op_AND_132)), DUP(op_AND_132))), op_LSHIFT_156); + RzILOpPure *op_RSHIFT_161 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_164 = LOGAND(op_RSHIFT_161, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_170 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_173 = LOGAND(op_RSHIFT_170, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_176 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_164), DUP(op_AND_164))), CAST(16, MSB(DUP(op_AND_164)), DUP(op_AND_164))), CAST(32, MSB(CAST(16, MSB(op_AND_173), DUP(op_AND_173))), CAST(16, MSB(DUP(op_AND_173)), DUP(op_AND_173)))); + RzILOpPure *op_LSHIFT_179 = SHIFTL0(CAST(64, MSB(op_MUL_176), DUP(op_MUL_176)), SN(32, 0)); + RzILOpPure *op_ADD_180 = ADD(op_ADD_157, op_LSHIFT_179); + RzILOpPure *cond_260 = ITE(DUP(op_EQ_126), op_ADD_180, VARL("h_tmp379")); + RzILOpPure *op_AND_262 = LOGAND(cond_260, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_266 = SHIFTL0(op_AND_262, SN(32, 0)); + RzILOpPure *op_OR_267 = LOGOR(op_AND_7, op_LSHIFT_266); + RzILOpEffect *op_ASSIGN_268 = WRITE_REG(bundle, Rxx_op, op_OR_267); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_269 = SEQN(2, seq_259, op_ASSIGN_268); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_451 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_284 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_286 = LOGAND(op_RSHIFT_284, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_292 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_295 = LOGAND(op_RSHIFT_292, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_301 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_304 = LOGAND(op_RSHIFT_301, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_307 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_295), DUP(op_AND_295))), CAST(16, MSB(DUP(op_AND_295)), DUP(op_AND_295))), CAST(32, MSB(CAST(16, MSB(op_AND_304), DUP(op_AND_304))), CAST(16, MSB(DUP(op_AND_304)), DUP(op_AND_304)))); + RzILOpPure *op_LSHIFT_310 = SHIFTL0(CAST(64, MSB(op_MUL_307), DUP(op_MUL_307)), SN(32, 0)); + RzILOpPure *op_ADD_311 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_286), DUP(op_AND_286))), CAST(32, MSB(DUP(op_AND_286)), DUP(op_AND_286))), op_LSHIFT_310); + RzILOpPure *op_RSHIFT_315 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_318 = LOGAND(op_RSHIFT_315, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_324 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_327 = LOGAND(op_RSHIFT_324, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_330 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_318), DUP(op_AND_318))), CAST(16, MSB(DUP(op_AND_318)), DUP(op_AND_318))), CAST(32, MSB(CAST(16, MSB(op_AND_327), DUP(op_AND_327))), CAST(16, MSB(DUP(op_AND_327)), DUP(op_AND_327)))); + RzILOpPure *op_LSHIFT_333 = SHIFTL0(CAST(64, MSB(op_MUL_330), DUP(op_MUL_330)), SN(32, 0)); + RzILOpPure *op_ADD_334 = ADD(op_ADD_311, op_LSHIFT_333); + RzILOpPure *op_RSHIFT_343 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_345 = LOGAND(op_RSHIFT_343, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_351 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_354 = LOGAND(op_RSHIFT_351, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_360 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_363 = LOGAND(op_RSHIFT_360, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_366 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_354), DUP(op_AND_354))), CAST(16, MSB(DUP(op_AND_354)), DUP(op_AND_354))), CAST(32, MSB(CAST(16, MSB(op_AND_363), DUP(op_AND_363))), CAST(16, MSB(DUP(op_AND_363)), DUP(op_AND_363)))); + RzILOpPure *op_LSHIFT_369 = SHIFTL0(CAST(64, MSB(op_MUL_366), DUP(op_MUL_366)), SN(32, 0)); + RzILOpPure *op_ADD_370 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_345), DUP(op_AND_345))), CAST(32, MSB(DUP(op_AND_345)), DUP(op_AND_345))), op_LSHIFT_369); + RzILOpPure *op_RSHIFT_374 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_377 = LOGAND(op_RSHIFT_374, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_383 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_386 = LOGAND(op_RSHIFT_383, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_389 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_377), DUP(op_AND_377))), CAST(16, MSB(DUP(op_AND_377)), DUP(op_AND_377))), CAST(32, MSB(CAST(16, MSB(op_AND_386), DUP(op_AND_386))), CAST(16, MSB(DUP(op_AND_386)), DUP(op_AND_386)))); + RzILOpPure *op_LSHIFT_392 = SHIFTL0(CAST(64, MSB(op_MUL_389), DUP(op_MUL_389)), SN(32, 0)); + RzILOpPure *op_ADD_393 = ADD(op_ADD_370, op_LSHIFT_392); + RzILOpPure *op_EQ_394 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_334), SN(32, 0), SN(32, 0x20)), op_ADD_393); + RzILOpPure *op_RSHIFT_455 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_457 = LOGAND(op_RSHIFT_455, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_463 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_466 = LOGAND(op_RSHIFT_463, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_472 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_475 = LOGAND(op_RSHIFT_472, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_478 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_466), DUP(op_AND_466))), CAST(16, MSB(DUP(op_AND_466)), DUP(op_AND_466))), CAST(32, MSB(CAST(16, MSB(op_AND_475), DUP(op_AND_475))), CAST(16, MSB(DUP(op_AND_475)), DUP(op_AND_475)))); + RzILOpPure *op_LSHIFT_481 = SHIFTL0(CAST(64, MSB(op_MUL_478), DUP(op_MUL_478)), SN(32, 0)); + RzILOpPure *op_ADD_482 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_457), DUP(op_AND_457))), CAST(32, MSB(DUP(op_AND_457)), DUP(op_AND_457))), op_LSHIFT_481); + RzILOpPure *op_RSHIFT_486 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_489 = LOGAND(op_RSHIFT_486, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_495 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_498 = LOGAND(op_RSHIFT_495, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_501 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_489), DUP(op_AND_489))), CAST(16, MSB(DUP(op_AND_489)), DUP(op_AND_489))), CAST(32, MSB(CAST(16, MSB(op_AND_498), DUP(op_AND_498))), CAST(16, MSB(DUP(op_AND_498)), DUP(op_AND_498)))); + RzILOpPure *op_LSHIFT_504 = SHIFTL0(CAST(64, MSB(op_MUL_501), DUP(op_MUL_501)), SN(32, 0)); + RzILOpPure *op_ADD_505 = ADD(op_ADD_482, op_LSHIFT_504); + RzILOpPure *op_LT_508 = SLT(op_ADD_505, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_513 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_514 = NEG(op_LSHIFT_513); + RzILOpPure *op_LSHIFT_519 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_522 = SUB(op_LSHIFT_519, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_523 = ITE(op_LT_508, op_NEG_514, op_SUB_522); + RzILOpEffect *gcc_expr_524 = BRANCH(op_EQ_394, EMPTY(), set_usr_field_call_451); + + // h_tmp380 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_526 = SETL("h_tmp380", cond_523); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_527 = SEQN(2, gcc_expr_524, op_ASSIGN_hybrid_tmp_526); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0)) ? ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0) : h_tmp380) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_275 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_276 = LOGNOT(op_LSHIFT_275); + RzILOpPure *op_AND_277 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_276); + RzILOpPure *op_RSHIFT_398 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_400 = LOGAND(op_RSHIFT_398, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_406 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_409 = LOGAND(op_RSHIFT_406, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_415 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_418 = LOGAND(op_RSHIFT_415, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_421 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_409), DUP(op_AND_409))), CAST(16, MSB(DUP(op_AND_409)), DUP(op_AND_409))), CAST(32, MSB(CAST(16, MSB(op_AND_418), DUP(op_AND_418))), CAST(16, MSB(DUP(op_AND_418)), DUP(op_AND_418)))); + RzILOpPure *op_LSHIFT_424 = SHIFTL0(CAST(64, MSB(op_MUL_421), DUP(op_MUL_421)), SN(32, 0)); + RzILOpPure *op_ADD_425 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_400), DUP(op_AND_400))), CAST(32, MSB(DUP(op_AND_400)), DUP(op_AND_400))), op_LSHIFT_424); + RzILOpPure *op_RSHIFT_429 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_432 = LOGAND(op_RSHIFT_429, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_438 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_441 = LOGAND(op_RSHIFT_438, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_444 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_432), DUP(op_AND_432))), CAST(16, MSB(DUP(op_AND_432)), DUP(op_AND_432))), CAST(32, MSB(CAST(16, MSB(op_AND_441), DUP(op_AND_441))), CAST(16, MSB(DUP(op_AND_441)), DUP(op_AND_441)))); + RzILOpPure *op_LSHIFT_447 = SHIFTL0(CAST(64, MSB(op_MUL_444), DUP(op_MUL_444)), SN(32, 0)); + RzILOpPure *op_ADD_448 = ADD(op_ADD_425, op_LSHIFT_447); + RzILOpPure *cond_528 = ITE(DUP(op_EQ_394), op_ADD_448, VARL("h_tmp380")); + RzILOpPure *op_AND_530 = LOGAND(cond_528, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_534 = SHIFTL0(op_AND_530, SN(32, 0x20)); + RzILOpPure *op_OR_535 = LOGOR(op_AND_277, op_LSHIFT_534); + RzILOpEffect *op_ASSIGN_536 = WRITE_REG(bundle, Rxx_op, op_OR_535); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_537 = SEQN(2, seq_527, op_ASSIGN_536); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_269, seq_537); + return instruction_sequence; +} + +// Rxx += vdmpy(Rss,Rtt):<<1:sat +RzILOpEffect *hex_il_op_m2_vdmacs_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_183 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_26 = LOGAND(op_RSHIFT_23, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_33 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_36 = LOGAND(op_RSHIFT_33, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_39 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_26), DUP(op_AND_26))), CAST(16, MSB(DUP(op_AND_26)), DUP(op_AND_26))), CAST(32, MSB(CAST(16, MSB(op_AND_36), DUP(op_AND_36))), CAST(16, MSB(DUP(op_AND_36)), DUP(op_AND_36)))); + RzILOpPure *op_LSHIFT_42 = SHIFTL0(CAST(64, MSB(op_MUL_39), DUP(op_MUL_39)), SN(32, 1)); + RzILOpPure *op_ADD_43 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_16), DUP(op_AND_16))), CAST(32, MSB(DUP(op_AND_16)), DUP(op_AND_16))), op_LSHIFT_42); + RzILOpPure *op_RSHIFT_47 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_50 = LOGAND(op_RSHIFT_47, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_56 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_59 = LOGAND(op_RSHIFT_56, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_62 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_50), DUP(op_AND_50))), CAST(16, MSB(DUP(op_AND_50)), DUP(op_AND_50))), CAST(32, MSB(CAST(16, MSB(op_AND_59), DUP(op_AND_59))), CAST(16, MSB(DUP(op_AND_59)), DUP(op_AND_59)))); + RzILOpPure *op_LSHIFT_65 = SHIFTL0(CAST(64, MSB(op_MUL_62), DUP(op_MUL_62)), SN(32, 1)); + RzILOpPure *op_ADD_66 = ADD(op_ADD_43, op_LSHIFT_65); + RzILOpPure *op_RSHIFT_75 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_77 = LOGAND(op_RSHIFT_75, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_83 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_86 = LOGAND(op_RSHIFT_83, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_92 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_95 = LOGAND(op_RSHIFT_92, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_98 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_86), DUP(op_AND_86))), CAST(16, MSB(DUP(op_AND_86)), DUP(op_AND_86))), CAST(32, MSB(CAST(16, MSB(op_AND_95), DUP(op_AND_95))), CAST(16, MSB(DUP(op_AND_95)), DUP(op_AND_95)))); + RzILOpPure *op_LSHIFT_101 = SHIFTL0(CAST(64, MSB(op_MUL_98), DUP(op_MUL_98)), SN(32, 1)); + RzILOpPure *op_ADD_102 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_77), DUP(op_AND_77))), CAST(32, MSB(DUP(op_AND_77)), DUP(op_AND_77))), op_LSHIFT_101); + RzILOpPure *op_RSHIFT_106 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_109 = LOGAND(op_RSHIFT_106, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_115 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_118 = LOGAND(op_RSHIFT_115, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_121 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_109), DUP(op_AND_109))), CAST(16, MSB(DUP(op_AND_109)), DUP(op_AND_109))), CAST(32, MSB(CAST(16, MSB(op_AND_118), DUP(op_AND_118))), CAST(16, MSB(DUP(op_AND_118)), DUP(op_AND_118)))); + RzILOpPure *op_LSHIFT_124 = SHIFTL0(CAST(64, MSB(op_MUL_121), DUP(op_MUL_121)), SN(32, 1)); + RzILOpPure *op_ADD_125 = ADD(op_ADD_102, op_LSHIFT_124); + RzILOpPure *op_EQ_126 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_66), SN(32, 0), SN(32, 0x20)), op_ADD_125); + RzILOpPure *op_RSHIFT_187 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_189 = LOGAND(op_RSHIFT_187, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_195 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_198 = LOGAND(op_RSHIFT_195, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_204 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_207 = LOGAND(op_RSHIFT_204, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_210 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_198), DUP(op_AND_198))), CAST(16, MSB(DUP(op_AND_198)), DUP(op_AND_198))), CAST(32, MSB(CAST(16, MSB(op_AND_207), DUP(op_AND_207))), CAST(16, MSB(DUP(op_AND_207)), DUP(op_AND_207)))); + RzILOpPure *op_LSHIFT_213 = SHIFTL0(CAST(64, MSB(op_MUL_210), DUP(op_MUL_210)), SN(32, 1)); + RzILOpPure *op_ADD_214 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_189), DUP(op_AND_189))), CAST(32, MSB(DUP(op_AND_189)), DUP(op_AND_189))), op_LSHIFT_213); + RzILOpPure *op_RSHIFT_218 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_221 = LOGAND(op_RSHIFT_218, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_227 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_230 = LOGAND(op_RSHIFT_227, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_233 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_221), DUP(op_AND_221))), CAST(16, MSB(DUP(op_AND_221)), DUP(op_AND_221))), CAST(32, MSB(CAST(16, MSB(op_AND_230), DUP(op_AND_230))), CAST(16, MSB(DUP(op_AND_230)), DUP(op_AND_230)))); + RzILOpPure *op_LSHIFT_236 = SHIFTL0(CAST(64, MSB(op_MUL_233), DUP(op_MUL_233)), SN(32, 1)); + RzILOpPure *op_ADD_237 = ADD(op_ADD_214, op_LSHIFT_236); + RzILOpPure *op_LT_240 = SLT(op_ADD_237, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_245 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_246 = NEG(op_LSHIFT_245); + RzILOpPure *op_LSHIFT_251 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_254 = SUB(op_LSHIFT_251, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_255 = ITE(op_LT_240, op_NEG_246, op_SUB_254); + RzILOpEffect *gcc_expr_256 = BRANCH(op_EQ_126, EMPTY(), set_usr_field_call_183); + + // h_tmp381 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_258 = SETL("h_tmp381", cond_255); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_259 = SEQN(2, gcc_expr_256, op_ASSIGN_hybrid_tmp_258); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1)) ? ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1) : h_tmp381) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_130 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_132 = LOGAND(op_RSHIFT_130, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_138 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_141 = LOGAND(op_RSHIFT_138, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_147 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_150 = LOGAND(op_RSHIFT_147, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_153 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_141), DUP(op_AND_141))), CAST(16, MSB(DUP(op_AND_141)), DUP(op_AND_141))), CAST(32, MSB(CAST(16, MSB(op_AND_150), DUP(op_AND_150))), CAST(16, MSB(DUP(op_AND_150)), DUP(op_AND_150)))); + RzILOpPure *op_LSHIFT_156 = SHIFTL0(CAST(64, MSB(op_MUL_153), DUP(op_MUL_153)), SN(32, 1)); + RzILOpPure *op_ADD_157 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_132), DUP(op_AND_132))), CAST(32, MSB(DUP(op_AND_132)), DUP(op_AND_132))), op_LSHIFT_156); + RzILOpPure *op_RSHIFT_161 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_164 = LOGAND(op_RSHIFT_161, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_170 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_173 = LOGAND(op_RSHIFT_170, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_176 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_164), DUP(op_AND_164))), CAST(16, MSB(DUP(op_AND_164)), DUP(op_AND_164))), CAST(32, MSB(CAST(16, MSB(op_AND_173), DUP(op_AND_173))), CAST(16, MSB(DUP(op_AND_173)), DUP(op_AND_173)))); + RzILOpPure *op_LSHIFT_179 = SHIFTL0(CAST(64, MSB(op_MUL_176), DUP(op_MUL_176)), SN(32, 1)); + RzILOpPure *op_ADD_180 = ADD(op_ADD_157, op_LSHIFT_179); + RzILOpPure *cond_260 = ITE(DUP(op_EQ_126), op_ADD_180, VARL("h_tmp381")); + RzILOpPure *op_AND_262 = LOGAND(cond_260, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_266 = SHIFTL0(op_AND_262, SN(32, 0)); + RzILOpPure *op_OR_267 = LOGOR(op_AND_7, op_LSHIFT_266); + RzILOpEffect *op_ASSIGN_268 = WRITE_REG(bundle, Rxx_op, op_OR_267); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_269 = SEQN(2, seq_259, op_ASSIGN_268); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_451 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_284 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_286 = LOGAND(op_RSHIFT_284, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_292 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_295 = LOGAND(op_RSHIFT_292, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_301 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_304 = LOGAND(op_RSHIFT_301, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_307 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_295), DUP(op_AND_295))), CAST(16, MSB(DUP(op_AND_295)), DUP(op_AND_295))), CAST(32, MSB(CAST(16, MSB(op_AND_304), DUP(op_AND_304))), CAST(16, MSB(DUP(op_AND_304)), DUP(op_AND_304)))); + RzILOpPure *op_LSHIFT_310 = SHIFTL0(CAST(64, MSB(op_MUL_307), DUP(op_MUL_307)), SN(32, 1)); + RzILOpPure *op_ADD_311 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_286), DUP(op_AND_286))), CAST(32, MSB(DUP(op_AND_286)), DUP(op_AND_286))), op_LSHIFT_310); + RzILOpPure *op_RSHIFT_315 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_318 = LOGAND(op_RSHIFT_315, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_324 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_327 = LOGAND(op_RSHIFT_324, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_330 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_318), DUP(op_AND_318))), CAST(16, MSB(DUP(op_AND_318)), DUP(op_AND_318))), CAST(32, MSB(CAST(16, MSB(op_AND_327), DUP(op_AND_327))), CAST(16, MSB(DUP(op_AND_327)), DUP(op_AND_327)))); + RzILOpPure *op_LSHIFT_333 = SHIFTL0(CAST(64, MSB(op_MUL_330), DUP(op_MUL_330)), SN(32, 1)); + RzILOpPure *op_ADD_334 = ADD(op_ADD_311, op_LSHIFT_333); + RzILOpPure *op_RSHIFT_343 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_345 = LOGAND(op_RSHIFT_343, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_351 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_354 = LOGAND(op_RSHIFT_351, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_360 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_363 = LOGAND(op_RSHIFT_360, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_366 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_354), DUP(op_AND_354))), CAST(16, MSB(DUP(op_AND_354)), DUP(op_AND_354))), CAST(32, MSB(CAST(16, MSB(op_AND_363), DUP(op_AND_363))), CAST(16, MSB(DUP(op_AND_363)), DUP(op_AND_363)))); + RzILOpPure *op_LSHIFT_369 = SHIFTL0(CAST(64, MSB(op_MUL_366), DUP(op_MUL_366)), SN(32, 1)); + RzILOpPure *op_ADD_370 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_345), DUP(op_AND_345))), CAST(32, MSB(DUP(op_AND_345)), DUP(op_AND_345))), op_LSHIFT_369); + RzILOpPure *op_RSHIFT_374 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_377 = LOGAND(op_RSHIFT_374, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_383 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_386 = LOGAND(op_RSHIFT_383, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_389 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_377), DUP(op_AND_377))), CAST(16, MSB(DUP(op_AND_377)), DUP(op_AND_377))), CAST(32, MSB(CAST(16, MSB(op_AND_386), DUP(op_AND_386))), CAST(16, MSB(DUP(op_AND_386)), DUP(op_AND_386)))); + RzILOpPure *op_LSHIFT_392 = SHIFTL0(CAST(64, MSB(op_MUL_389), DUP(op_MUL_389)), SN(32, 1)); + RzILOpPure *op_ADD_393 = ADD(op_ADD_370, op_LSHIFT_392); + RzILOpPure *op_EQ_394 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_334), SN(32, 0), SN(32, 0x20)), op_ADD_393); + RzILOpPure *op_RSHIFT_455 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_457 = LOGAND(op_RSHIFT_455, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_463 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_466 = LOGAND(op_RSHIFT_463, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_472 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_475 = LOGAND(op_RSHIFT_472, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_478 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_466), DUP(op_AND_466))), CAST(16, MSB(DUP(op_AND_466)), DUP(op_AND_466))), CAST(32, MSB(CAST(16, MSB(op_AND_475), DUP(op_AND_475))), CAST(16, MSB(DUP(op_AND_475)), DUP(op_AND_475)))); + RzILOpPure *op_LSHIFT_481 = SHIFTL0(CAST(64, MSB(op_MUL_478), DUP(op_MUL_478)), SN(32, 1)); + RzILOpPure *op_ADD_482 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_457), DUP(op_AND_457))), CAST(32, MSB(DUP(op_AND_457)), DUP(op_AND_457))), op_LSHIFT_481); + RzILOpPure *op_RSHIFT_486 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_489 = LOGAND(op_RSHIFT_486, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_495 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_498 = LOGAND(op_RSHIFT_495, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_501 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_489), DUP(op_AND_489))), CAST(16, MSB(DUP(op_AND_489)), DUP(op_AND_489))), CAST(32, MSB(CAST(16, MSB(op_AND_498), DUP(op_AND_498))), CAST(16, MSB(DUP(op_AND_498)), DUP(op_AND_498)))); + RzILOpPure *op_LSHIFT_504 = SHIFTL0(CAST(64, MSB(op_MUL_501), DUP(op_MUL_501)), SN(32, 1)); + RzILOpPure *op_ADD_505 = ADD(op_ADD_482, op_LSHIFT_504); + RzILOpPure *op_LT_508 = SLT(op_ADD_505, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_513 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_514 = NEG(op_LSHIFT_513); + RzILOpPure *op_LSHIFT_519 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_522 = SUB(op_LSHIFT_519, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_523 = ITE(op_LT_508, op_NEG_514, op_SUB_522); + RzILOpEffect *gcc_expr_524 = BRANCH(op_EQ_394, EMPTY(), set_usr_field_call_451); + + // h_tmp382 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_526 = SETL("h_tmp382", cond_523); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_527 = SEQN(2, gcc_expr_524, op_ASSIGN_hybrid_tmp_526); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1)) ? ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1) : h_tmp382) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_275 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_276 = LOGNOT(op_LSHIFT_275); + RzILOpPure *op_AND_277 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_276); + RzILOpPure *op_RSHIFT_398 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_400 = LOGAND(op_RSHIFT_398, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_406 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_409 = LOGAND(op_RSHIFT_406, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_415 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_418 = LOGAND(op_RSHIFT_415, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_421 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_409), DUP(op_AND_409))), CAST(16, MSB(DUP(op_AND_409)), DUP(op_AND_409))), CAST(32, MSB(CAST(16, MSB(op_AND_418), DUP(op_AND_418))), CAST(16, MSB(DUP(op_AND_418)), DUP(op_AND_418)))); + RzILOpPure *op_LSHIFT_424 = SHIFTL0(CAST(64, MSB(op_MUL_421), DUP(op_MUL_421)), SN(32, 1)); + RzILOpPure *op_ADD_425 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_400), DUP(op_AND_400))), CAST(32, MSB(DUP(op_AND_400)), DUP(op_AND_400))), op_LSHIFT_424); + RzILOpPure *op_RSHIFT_429 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_432 = LOGAND(op_RSHIFT_429, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_438 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_441 = LOGAND(op_RSHIFT_438, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_444 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_432), DUP(op_AND_432))), CAST(16, MSB(DUP(op_AND_432)), DUP(op_AND_432))), CAST(32, MSB(CAST(16, MSB(op_AND_441), DUP(op_AND_441))), CAST(16, MSB(DUP(op_AND_441)), DUP(op_AND_441)))); + RzILOpPure *op_LSHIFT_447 = SHIFTL0(CAST(64, MSB(op_MUL_444), DUP(op_MUL_444)), SN(32, 1)); + RzILOpPure *op_ADD_448 = ADD(op_ADD_425, op_LSHIFT_447); + RzILOpPure *cond_528 = ITE(DUP(op_EQ_394), op_ADD_448, VARL("h_tmp382")); + RzILOpPure *op_AND_530 = LOGAND(cond_528, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_534 = SHIFTL0(op_AND_530, SN(32, 0x20)); + RzILOpPure *op_OR_535 = LOGOR(op_AND_277, op_LSHIFT_534); + RzILOpEffect *op_ASSIGN_536 = WRITE_REG(bundle, Rxx_op, op_OR_535); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_537 = SEQN(2, seq_527, op_ASSIGN_536); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_269, seq_537); + return instruction_sequence; +} + +// Rd = vdmpy(Rss,Rtt):rnd:sat +RzILOpEffect *hex_il_op_m2_vdmpyrs_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_166 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_16, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_26 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_29 = LOGAND(op_RSHIFT_26, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_32 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_19), DUP(op_AND_19))), CAST(16, MSB(DUP(op_AND_19)), DUP(op_AND_19))), CAST(32, MSB(CAST(16, MSB(op_AND_29), DUP(op_AND_29))), CAST(16, MSB(DUP(op_AND_29)), DUP(op_AND_29)))); + RzILOpPure *op_LSHIFT_35 = SHIFTL0(CAST(64, MSB(op_MUL_32), DUP(op_MUL_32)), SN(32, 0)); + RzILOpPure *op_RSHIFT_39 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_42 = LOGAND(op_RSHIFT_39, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_48 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_51 = LOGAND(op_RSHIFT_48, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_54 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_42), DUP(op_AND_42))), CAST(16, MSB(DUP(op_AND_42)), DUP(op_AND_42))), CAST(32, MSB(CAST(16, MSB(op_AND_51), DUP(op_AND_51))), CAST(16, MSB(DUP(op_AND_51)), DUP(op_AND_51)))); + RzILOpPure *op_LSHIFT_57 = SHIFTL0(CAST(64, MSB(op_MUL_54), DUP(op_MUL_54)), SN(32, 0)); + RzILOpPure *op_ADD_58 = ADD(op_LSHIFT_35, op_LSHIFT_57); + RzILOpPure *op_ADD_61 = ADD(op_ADD_58, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_70 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_73 = LOGAND(op_RSHIFT_70, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_79 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_82 = LOGAND(op_RSHIFT_79, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_85 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_73), DUP(op_AND_73))), CAST(16, MSB(DUP(op_AND_73)), DUP(op_AND_73))), CAST(32, MSB(CAST(16, MSB(op_AND_82), DUP(op_AND_82))), CAST(16, MSB(DUP(op_AND_82)), DUP(op_AND_82)))); + RzILOpPure *op_LSHIFT_88 = SHIFTL0(CAST(64, MSB(op_MUL_85), DUP(op_MUL_85)), SN(32, 0)); + RzILOpPure *op_RSHIFT_92 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_95 = LOGAND(op_RSHIFT_92, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_101 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_104 = LOGAND(op_RSHIFT_101, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_107 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_95), DUP(op_AND_95))), CAST(16, MSB(DUP(op_AND_95)), DUP(op_AND_95))), CAST(32, MSB(CAST(16, MSB(op_AND_104), DUP(op_AND_104))), CAST(16, MSB(DUP(op_AND_104)), DUP(op_AND_104)))); + RzILOpPure *op_LSHIFT_110 = SHIFTL0(CAST(64, MSB(op_MUL_107), DUP(op_MUL_107)), SN(32, 0)); + RzILOpPure *op_ADD_111 = ADD(op_LSHIFT_88, op_LSHIFT_110); + RzILOpPure *op_ADD_114 = ADD(op_ADD_111, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_EQ_115 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_61), SN(32, 0), SN(32, 0x20)), op_ADD_114); + RzILOpPure *op_RSHIFT_170 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_173 = LOGAND(op_RSHIFT_170, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_179 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_182 = LOGAND(op_RSHIFT_179, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_185 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_173), DUP(op_AND_173))), CAST(16, MSB(DUP(op_AND_173)), DUP(op_AND_173))), CAST(32, MSB(CAST(16, MSB(op_AND_182), DUP(op_AND_182))), CAST(16, MSB(DUP(op_AND_182)), DUP(op_AND_182)))); + RzILOpPure *op_LSHIFT_188 = SHIFTL0(CAST(64, MSB(op_MUL_185), DUP(op_MUL_185)), SN(32, 0)); + RzILOpPure *op_RSHIFT_192 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_195 = LOGAND(op_RSHIFT_192, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_201 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_204 = LOGAND(op_RSHIFT_201, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_207 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_195), DUP(op_AND_195))), CAST(16, MSB(DUP(op_AND_195)), DUP(op_AND_195))), CAST(32, MSB(CAST(16, MSB(op_AND_204), DUP(op_AND_204))), CAST(16, MSB(DUP(op_AND_204)), DUP(op_AND_204)))); + RzILOpPure *op_LSHIFT_210 = SHIFTL0(CAST(64, MSB(op_MUL_207), DUP(op_MUL_207)), SN(32, 0)); + RzILOpPure *op_ADD_211 = ADD(op_LSHIFT_188, op_LSHIFT_210); + RzILOpPure *op_ADD_214 = ADD(op_ADD_211, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_LT_217 = SLT(op_ADD_214, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_222 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_223 = NEG(op_LSHIFT_222); + RzILOpPure *op_LSHIFT_228 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_231 = SUB(op_LSHIFT_228, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_232 = ITE(op_LT_217, op_NEG_223, op_SUB_231); + RzILOpEffect *gcc_expr_233 = BRANCH(op_EQ_115, EMPTY(), set_usr_field_call_166); + + // h_tmp383 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_235 = SETL("h_tmp383", cond_232); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_236 = SEQN(2, gcc_expr_233, op_ASSIGN_hybrid_tmp_235); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << 0x0)))) | (((ut64) (((st32) ((st16) ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0) + ((st64) 0x8000)) ? (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0) + ((st64) 0x8000) : h_tmp383) >> 0x10) & ((st64) 0xffff)))) & 0xffff)) << 0x0))); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_8 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_6); + RzILOpPure *op_RSHIFT_119 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_122 = LOGAND(op_RSHIFT_119, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_128 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_131 = LOGAND(op_RSHIFT_128, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_134 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_122), DUP(op_AND_122))), CAST(16, MSB(DUP(op_AND_122)), DUP(op_AND_122))), CAST(32, MSB(CAST(16, MSB(op_AND_131), DUP(op_AND_131))), CAST(16, MSB(DUP(op_AND_131)), DUP(op_AND_131)))); + RzILOpPure *op_LSHIFT_137 = SHIFTL0(CAST(64, MSB(op_MUL_134), DUP(op_MUL_134)), SN(32, 0)); + RzILOpPure *op_RSHIFT_141 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_144 = LOGAND(op_RSHIFT_141, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_150 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_153 = LOGAND(op_RSHIFT_150, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_156 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_144), DUP(op_AND_144))), CAST(16, MSB(DUP(op_AND_144)), DUP(op_AND_144))), CAST(32, MSB(CAST(16, MSB(op_AND_153), DUP(op_AND_153))), CAST(16, MSB(DUP(op_AND_153)), DUP(op_AND_153)))); + RzILOpPure *op_LSHIFT_159 = SHIFTL0(CAST(64, MSB(op_MUL_156), DUP(op_MUL_156)), SN(32, 0)); + RzILOpPure *op_ADD_160 = ADD(op_LSHIFT_137, op_LSHIFT_159); + RzILOpPure *op_ADD_163 = ADD(op_ADD_160, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *cond_237 = ITE(DUP(op_EQ_115), op_ADD_163, VARL("h_tmp383")); + RzILOpPure *op_RSHIFT_241 = SHIFTRA(cond_237, SN(32, 16)); + RzILOpPure *op_AND_244 = LOGAND(op_RSHIFT_241, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_248 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_244), DUP(op_AND_244))), CAST(16, MSB(DUP(op_AND_244)), DUP(op_AND_244))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_253 = SHIFTL0(CAST(64, IL_FALSE, op_AND_248), SN(32, 0)); + RzILOpPure *op_OR_255 = LOGOR(CAST(64, IL_FALSE, op_AND_8), op_LSHIFT_253); + RzILOpEffect *op_ASSIGN_257 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_255)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_258 = SEQN(2, seq_236, op_ASSIGN_257); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_423 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_274 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_277 = LOGAND(op_RSHIFT_274, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_283 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_286 = LOGAND(op_RSHIFT_283, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_289 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_277), DUP(op_AND_277))), CAST(16, MSB(DUP(op_AND_277)), DUP(op_AND_277))), CAST(32, MSB(CAST(16, MSB(op_AND_286), DUP(op_AND_286))), CAST(16, MSB(DUP(op_AND_286)), DUP(op_AND_286)))); + RzILOpPure *op_LSHIFT_292 = SHIFTL0(CAST(64, MSB(op_MUL_289), DUP(op_MUL_289)), SN(32, 0)); + RzILOpPure *op_RSHIFT_296 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_299 = LOGAND(op_RSHIFT_296, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_305 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_308 = LOGAND(op_RSHIFT_305, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_311 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_299), DUP(op_AND_299))), CAST(16, MSB(DUP(op_AND_299)), DUP(op_AND_299))), CAST(32, MSB(CAST(16, MSB(op_AND_308), DUP(op_AND_308))), CAST(16, MSB(DUP(op_AND_308)), DUP(op_AND_308)))); + RzILOpPure *op_LSHIFT_314 = SHIFTL0(CAST(64, MSB(op_MUL_311), DUP(op_MUL_311)), SN(32, 0)); + RzILOpPure *op_ADD_315 = ADD(op_LSHIFT_292, op_LSHIFT_314); + RzILOpPure *op_ADD_318 = ADD(op_ADD_315, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_327 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_330 = LOGAND(op_RSHIFT_327, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_336 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_339 = LOGAND(op_RSHIFT_336, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_342 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_330), DUP(op_AND_330))), CAST(16, MSB(DUP(op_AND_330)), DUP(op_AND_330))), CAST(32, MSB(CAST(16, MSB(op_AND_339), DUP(op_AND_339))), CAST(16, MSB(DUP(op_AND_339)), DUP(op_AND_339)))); + RzILOpPure *op_LSHIFT_345 = SHIFTL0(CAST(64, MSB(op_MUL_342), DUP(op_MUL_342)), SN(32, 0)); + RzILOpPure *op_RSHIFT_349 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_352 = LOGAND(op_RSHIFT_349, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_358 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_361 = LOGAND(op_RSHIFT_358, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_364 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_352), DUP(op_AND_352))), CAST(16, MSB(DUP(op_AND_352)), DUP(op_AND_352))), CAST(32, MSB(CAST(16, MSB(op_AND_361), DUP(op_AND_361))), CAST(16, MSB(DUP(op_AND_361)), DUP(op_AND_361)))); + RzILOpPure *op_LSHIFT_367 = SHIFTL0(CAST(64, MSB(op_MUL_364), DUP(op_MUL_364)), SN(32, 0)); + RzILOpPure *op_ADD_368 = ADD(op_LSHIFT_345, op_LSHIFT_367); + RzILOpPure *op_ADD_371 = ADD(op_ADD_368, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_EQ_372 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_318), SN(32, 0), SN(32, 0x20)), op_ADD_371); + RzILOpPure *op_RSHIFT_427 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_430 = LOGAND(op_RSHIFT_427, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_436 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_439 = LOGAND(op_RSHIFT_436, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_442 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_430), DUP(op_AND_430))), CAST(16, MSB(DUP(op_AND_430)), DUP(op_AND_430))), CAST(32, MSB(CAST(16, MSB(op_AND_439), DUP(op_AND_439))), CAST(16, MSB(DUP(op_AND_439)), DUP(op_AND_439)))); + RzILOpPure *op_LSHIFT_445 = SHIFTL0(CAST(64, MSB(op_MUL_442), DUP(op_MUL_442)), SN(32, 0)); + RzILOpPure *op_RSHIFT_449 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_452 = LOGAND(op_RSHIFT_449, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_458 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_461 = LOGAND(op_RSHIFT_458, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_464 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_452), DUP(op_AND_452))), CAST(16, MSB(DUP(op_AND_452)), DUP(op_AND_452))), CAST(32, MSB(CAST(16, MSB(op_AND_461), DUP(op_AND_461))), CAST(16, MSB(DUP(op_AND_461)), DUP(op_AND_461)))); + RzILOpPure *op_LSHIFT_467 = SHIFTL0(CAST(64, MSB(op_MUL_464), DUP(op_MUL_464)), SN(32, 0)); + RzILOpPure *op_ADD_468 = ADD(op_LSHIFT_445, op_LSHIFT_467); + RzILOpPure *op_ADD_471 = ADD(op_ADD_468, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_LT_474 = SLT(op_ADD_471, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_479 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_480 = NEG(op_LSHIFT_479); + RzILOpPure *op_LSHIFT_485 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_488 = SUB(op_LSHIFT_485, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_489 = ITE(op_LT_474, op_NEG_480, op_SUB_488); + RzILOpEffect *gcc_expr_490 = BRANCH(op_EQ_372, EMPTY(), set_usr_field_call_423); + + // h_tmp384 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_492 = SETL("h_tmp384", cond_489); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_493 = SEQN(2, gcc_expr_490, op_ASSIGN_hybrid_tmp_492); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << 0x10)))) | (((ut64) (((st32) ((st16) ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0) + ((st64) 0x8000)) ? (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0) + ((st64) 0x8000) : h_tmp384) >> 0x10) & ((st64) 0xffff)))) & 0xffff)) << 0x10))); + RzILOpPure *op_LSHIFT_264 = SHIFTL0(SN(64, 0xffff), SN(32, 16)); + RzILOpPure *op_NOT_265 = LOGNOT(op_LSHIFT_264); + RzILOpPure *op_AND_267 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_265); + RzILOpPure *op_RSHIFT_376 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_379 = LOGAND(op_RSHIFT_376, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_385 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_388 = LOGAND(op_RSHIFT_385, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_391 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_379), DUP(op_AND_379))), CAST(16, MSB(DUP(op_AND_379)), DUP(op_AND_379))), CAST(32, MSB(CAST(16, MSB(op_AND_388), DUP(op_AND_388))), CAST(16, MSB(DUP(op_AND_388)), DUP(op_AND_388)))); + RzILOpPure *op_LSHIFT_394 = SHIFTL0(CAST(64, MSB(op_MUL_391), DUP(op_MUL_391)), SN(32, 0)); + RzILOpPure *op_RSHIFT_398 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_401 = LOGAND(op_RSHIFT_398, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_407 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_410 = LOGAND(op_RSHIFT_407, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_413 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_401), DUP(op_AND_401))), CAST(16, MSB(DUP(op_AND_401)), DUP(op_AND_401))), CAST(32, MSB(CAST(16, MSB(op_AND_410), DUP(op_AND_410))), CAST(16, MSB(DUP(op_AND_410)), DUP(op_AND_410)))); + RzILOpPure *op_LSHIFT_416 = SHIFTL0(CAST(64, MSB(op_MUL_413), DUP(op_MUL_413)), SN(32, 0)); + RzILOpPure *op_ADD_417 = ADD(op_LSHIFT_394, op_LSHIFT_416); + RzILOpPure *op_ADD_420 = ADD(op_ADD_417, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *cond_494 = ITE(DUP(op_EQ_372), op_ADD_420, VARL("h_tmp384")); + RzILOpPure *op_RSHIFT_498 = SHIFTRA(cond_494, SN(32, 16)); + RzILOpPure *op_AND_501 = LOGAND(op_RSHIFT_498, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_505 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_501), DUP(op_AND_501))), CAST(16, MSB(DUP(op_AND_501)), DUP(op_AND_501))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_510 = SHIFTL0(CAST(64, IL_FALSE, op_AND_505), SN(32, 16)); + RzILOpPure *op_OR_512 = LOGOR(CAST(64, IL_FALSE, op_AND_267), op_LSHIFT_510); + RzILOpEffect *op_ASSIGN_514 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_512)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_515 = SEQN(2, seq_493, op_ASSIGN_514); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_258, seq_515); + return instruction_sequence; +} + +// Rd = vdmpy(Rss,Rtt):<<1:rnd:sat +RzILOpEffect *hex_il_op_m2_vdmpyrs_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_166 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_16, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_26 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_29 = LOGAND(op_RSHIFT_26, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_32 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_19), DUP(op_AND_19))), CAST(16, MSB(DUP(op_AND_19)), DUP(op_AND_19))), CAST(32, MSB(CAST(16, MSB(op_AND_29), DUP(op_AND_29))), CAST(16, MSB(DUP(op_AND_29)), DUP(op_AND_29)))); + RzILOpPure *op_LSHIFT_35 = SHIFTL0(CAST(64, MSB(op_MUL_32), DUP(op_MUL_32)), SN(32, 1)); + RzILOpPure *op_RSHIFT_39 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_42 = LOGAND(op_RSHIFT_39, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_48 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_51 = LOGAND(op_RSHIFT_48, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_54 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_42), DUP(op_AND_42))), CAST(16, MSB(DUP(op_AND_42)), DUP(op_AND_42))), CAST(32, MSB(CAST(16, MSB(op_AND_51), DUP(op_AND_51))), CAST(16, MSB(DUP(op_AND_51)), DUP(op_AND_51)))); + RzILOpPure *op_LSHIFT_57 = SHIFTL0(CAST(64, MSB(op_MUL_54), DUP(op_MUL_54)), SN(32, 1)); + RzILOpPure *op_ADD_58 = ADD(op_LSHIFT_35, op_LSHIFT_57); + RzILOpPure *op_ADD_61 = ADD(op_ADD_58, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_70 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_73 = LOGAND(op_RSHIFT_70, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_79 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_82 = LOGAND(op_RSHIFT_79, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_85 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_73), DUP(op_AND_73))), CAST(16, MSB(DUP(op_AND_73)), DUP(op_AND_73))), CAST(32, MSB(CAST(16, MSB(op_AND_82), DUP(op_AND_82))), CAST(16, MSB(DUP(op_AND_82)), DUP(op_AND_82)))); + RzILOpPure *op_LSHIFT_88 = SHIFTL0(CAST(64, MSB(op_MUL_85), DUP(op_MUL_85)), SN(32, 1)); + RzILOpPure *op_RSHIFT_92 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_95 = LOGAND(op_RSHIFT_92, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_101 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_104 = LOGAND(op_RSHIFT_101, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_107 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_95), DUP(op_AND_95))), CAST(16, MSB(DUP(op_AND_95)), DUP(op_AND_95))), CAST(32, MSB(CAST(16, MSB(op_AND_104), DUP(op_AND_104))), CAST(16, MSB(DUP(op_AND_104)), DUP(op_AND_104)))); + RzILOpPure *op_LSHIFT_110 = SHIFTL0(CAST(64, MSB(op_MUL_107), DUP(op_MUL_107)), SN(32, 1)); + RzILOpPure *op_ADD_111 = ADD(op_LSHIFT_88, op_LSHIFT_110); + RzILOpPure *op_ADD_114 = ADD(op_ADD_111, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_EQ_115 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_61), SN(32, 0), SN(32, 0x20)), op_ADD_114); + RzILOpPure *op_RSHIFT_170 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_173 = LOGAND(op_RSHIFT_170, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_179 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_182 = LOGAND(op_RSHIFT_179, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_185 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_173), DUP(op_AND_173))), CAST(16, MSB(DUP(op_AND_173)), DUP(op_AND_173))), CAST(32, MSB(CAST(16, MSB(op_AND_182), DUP(op_AND_182))), CAST(16, MSB(DUP(op_AND_182)), DUP(op_AND_182)))); + RzILOpPure *op_LSHIFT_188 = SHIFTL0(CAST(64, MSB(op_MUL_185), DUP(op_MUL_185)), SN(32, 1)); + RzILOpPure *op_RSHIFT_192 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_195 = LOGAND(op_RSHIFT_192, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_201 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_204 = LOGAND(op_RSHIFT_201, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_207 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_195), DUP(op_AND_195))), CAST(16, MSB(DUP(op_AND_195)), DUP(op_AND_195))), CAST(32, MSB(CAST(16, MSB(op_AND_204), DUP(op_AND_204))), CAST(16, MSB(DUP(op_AND_204)), DUP(op_AND_204)))); + RzILOpPure *op_LSHIFT_210 = SHIFTL0(CAST(64, MSB(op_MUL_207), DUP(op_MUL_207)), SN(32, 1)); + RzILOpPure *op_ADD_211 = ADD(op_LSHIFT_188, op_LSHIFT_210); + RzILOpPure *op_ADD_214 = ADD(op_ADD_211, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_LT_217 = SLT(op_ADD_214, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_222 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_223 = NEG(op_LSHIFT_222); + RzILOpPure *op_LSHIFT_228 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_231 = SUB(op_LSHIFT_228, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_232 = ITE(op_LT_217, op_NEG_223, op_SUB_231); + RzILOpEffect *gcc_expr_233 = BRANCH(op_EQ_115, EMPTY(), set_usr_field_call_166); + + // h_tmp385 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_235 = SETL("h_tmp385", cond_232); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_236 = SEQN(2, gcc_expr_233, op_ASSIGN_hybrid_tmp_235); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << 0x0)))) | (((ut64) (((st32) ((st16) ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000)) ? (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000) : h_tmp385) >> 0x10) & ((st64) 0xffff)))) & 0xffff)) << 0x0))); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_8 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_6); + RzILOpPure *op_RSHIFT_119 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_122 = LOGAND(op_RSHIFT_119, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_128 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_131 = LOGAND(op_RSHIFT_128, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_134 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_122), DUP(op_AND_122))), CAST(16, MSB(DUP(op_AND_122)), DUP(op_AND_122))), CAST(32, MSB(CAST(16, MSB(op_AND_131), DUP(op_AND_131))), CAST(16, MSB(DUP(op_AND_131)), DUP(op_AND_131)))); + RzILOpPure *op_LSHIFT_137 = SHIFTL0(CAST(64, MSB(op_MUL_134), DUP(op_MUL_134)), SN(32, 1)); + RzILOpPure *op_RSHIFT_141 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_144 = LOGAND(op_RSHIFT_141, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_150 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_153 = LOGAND(op_RSHIFT_150, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_156 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_144), DUP(op_AND_144))), CAST(16, MSB(DUP(op_AND_144)), DUP(op_AND_144))), CAST(32, MSB(CAST(16, MSB(op_AND_153), DUP(op_AND_153))), CAST(16, MSB(DUP(op_AND_153)), DUP(op_AND_153)))); + RzILOpPure *op_LSHIFT_159 = SHIFTL0(CAST(64, MSB(op_MUL_156), DUP(op_MUL_156)), SN(32, 1)); + RzILOpPure *op_ADD_160 = ADD(op_LSHIFT_137, op_LSHIFT_159); + RzILOpPure *op_ADD_163 = ADD(op_ADD_160, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *cond_237 = ITE(DUP(op_EQ_115), op_ADD_163, VARL("h_tmp385")); + RzILOpPure *op_RSHIFT_241 = SHIFTRA(cond_237, SN(32, 16)); + RzILOpPure *op_AND_244 = LOGAND(op_RSHIFT_241, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_248 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_244), DUP(op_AND_244))), CAST(16, MSB(DUP(op_AND_244)), DUP(op_AND_244))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_253 = SHIFTL0(CAST(64, IL_FALSE, op_AND_248), SN(32, 0)); + RzILOpPure *op_OR_255 = LOGOR(CAST(64, IL_FALSE, op_AND_8), op_LSHIFT_253); + RzILOpEffect *op_ASSIGN_257 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_255)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_258 = SEQN(2, seq_236, op_ASSIGN_257); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_423 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_274 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_277 = LOGAND(op_RSHIFT_274, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_283 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_286 = LOGAND(op_RSHIFT_283, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_289 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_277), DUP(op_AND_277))), CAST(16, MSB(DUP(op_AND_277)), DUP(op_AND_277))), CAST(32, MSB(CAST(16, MSB(op_AND_286), DUP(op_AND_286))), CAST(16, MSB(DUP(op_AND_286)), DUP(op_AND_286)))); + RzILOpPure *op_LSHIFT_292 = SHIFTL0(CAST(64, MSB(op_MUL_289), DUP(op_MUL_289)), SN(32, 1)); + RzILOpPure *op_RSHIFT_296 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_299 = LOGAND(op_RSHIFT_296, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_305 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_308 = LOGAND(op_RSHIFT_305, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_311 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_299), DUP(op_AND_299))), CAST(16, MSB(DUP(op_AND_299)), DUP(op_AND_299))), CAST(32, MSB(CAST(16, MSB(op_AND_308), DUP(op_AND_308))), CAST(16, MSB(DUP(op_AND_308)), DUP(op_AND_308)))); + RzILOpPure *op_LSHIFT_314 = SHIFTL0(CAST(64, MSB(op_MUL_311), DUP(op_MUL_311)), SN(32, 1)); + RzILOpPure *op_ADD_315 = ADD(op_LSHIFT_292, op_LSHIFT_314); + RzILOpPure *op_ADD_318 = ADD(op_ADD_315, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_327 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_330 = LOGAND(op_RSHIFT_327, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_336 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_339 = LOGAND(op_RSHIFT_336, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_342 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_330), DUP(op_AND_330))), CAST(16, MSB(DUP(op_AND_330)), DUP(op_AND_330))), CAST(32, MSB(CAST(16, MSB(op_AND_339), DUP(op_AND_339))), CAST(16, MSB(DUP(op_AND_339)), DUP(op_AND_339)))); + RzILOpPure *op_LSHIFT_345 = SHIFTL0(CAST(64, MSB(op_MUL_342), DUP(op_MUL_342)), SN(32, 1)); + RzILOpPure *op_RSHIFT_349 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_352 = LOGAND(op_RSHIFT_349, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_358 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_361 = LOGAND(op_RSHIFT_358, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_364 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_352), DUP(op_AND_352))), CAST(16, MSB(DUP(op_AND_352)), DUP(op_AND_352))), CAST(32, MSB(CAST(16, MSB(op_AND_361), DUP(op_AND_361))), CAST(16, MSB(DUP(op_AND_361)), DUP(op_AND_361)))); + RzILOpPure *op_LSHIFT_367 = SHIFTL0(CAST(64, MSB(op_MUL_364), DUP(op_MUL_364)), SN(32, 1)); + RzILOpPure *op_ADD_368 = ADD(op_LSHIFT_345, op_LSHIFT_367); + RzILOpPure *op_ADD_371 = ADD(op_ADD_368, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_EQ_372 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_318), SN(32, 0), SN(32, 0x20)), op_ADD_371); + RzILOpPure *op_RSHIFT_427 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_430 = LOGAND(op_RSHIFT_427, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_436 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_439 = LOGAND(op_RSHIFT_436, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_442 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_430), DUP(op_AND_430))), CAST(16, MSB(DUP(op_AND_430)), DUP(op_AND_430))), CAST(32, MSB(CAST(16, MSB(op_AND_439), DUP(op_AND_439))), CAST(16, MSB(DUP(op_AND_439)), DUP(op_AND_439)))); + RzILOpPure *op_LSHIFT_445 = SHIFTL0(CAST(64, MSB(op_MUL_442), DUP(op_MUL_442)), SN(32, 1)); + RzILOpPure *op_RSHIFT_449 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_452 = LOGAND(op_RSHIFT_449, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_458 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_461 = LOGAND(op_RSHIFT_458, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_464 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_452), DUP(op_AND_452))), CAST(16, MSB(DUP(op_AND_452)), DUP(op_AND_452))), CAST(32, MSB(CAST(16, MSB(op_AND_461), DUP(op_AND_461))), CAST(16, MSB(DUP(op_AND_461)), DUP(op_AND_461)))); + RzILOpPure *op_LSHIFT_467 = SHIFTL0(CAST(64, MSB(op_MUL_464), DUP(op_MUL_464)), SN(32, 1)); + RzILOpPure *op_ADD_468 = ADD(op_LSHIFT_445, op_LSHIFT_467); + RzILOpPure *op_ADD_471 = ADD(op_ADD_468, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_LT_474 = SLT(op_ADD_471, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_479 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_480 = NEG(op_LSHIFT_479); + RzILOpPure *op_LSHIFT_485 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_488 = SUB(op_LSHIFT_485, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_489 = ITE(op_LT_474, op_NEG_480, op_SUB_488); + RzILOpEffect *gcc_expr_490 = BRANCH(op_EQ_372, EMPTY(), set_usr_field_call_423); + + // h_tmp386 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_492 = SETL("h_tmp386", cond_489); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_493 = SEQN(2, gcc_expr_490, op_ASSIGN_hybrid_tmp_492); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << 0x10)))) | (((ut64) (((st32) ((st16) ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000)) ? (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000) : h_tmp386) >> 0x10) & ((st64) 0xffff)))) & 0xffff)) << 0x10))); + RzILOpPure *op_LSHIFT_264 = SHIFTL0(SN(64, 0xffff), SN(32, 16)); + RzILOpPure *op_NOT_265 = LOGNOT(op_LSHIFT_264); + RzILOpPure *op_AND_267 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_265); + RzILOpPure *op_RSHIFT_376 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_379 = LOGAND(op_RSHIFT_376, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_385 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_388 = LOGAND(op_RSHIFT_385, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_391 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_379), DUP(op_AND_379))), CAST(16, MSB(DUP(op_AND_379)), DUP(op_AND_379))), CAST(32, MSB(CAST(16, MSB(op_AND_388), DUP(op_AND_388))), CAST(16, MSB(DUP(op_AND_388)), DUP(op_AND_388)))); + RzILOpPure *op_LSHIFT_394 = SHIFTL0(CAST(64, MSB(op_MUL_391), DUP(op_MUL_391)), SN(32, 1)); + RzILOpPure *op_RSHIFT_398 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_401 = LOGAND(op_RSHIFT_398, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_407 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_410 = LOGAND(op_RSHIFT_407, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_413 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_401), DUP(op_AND_401))), CAST(16, MSB(DUP(op_AND_401)), DUP(op_AND_401))), CAST(32, MSB(CAST(16, MSB(op_AND_410), DUP(op_AND_410))), CAST(16, MSB(DUP(op_AND_410)), DUP(op_AND_410)))); + RzILOpPure *op_LSHIFT_416 = SHIFTL0(CAST(64, MSB(op_MUL_413), DUP(op_MUL_413)), SN(32, 1)); + RzILOpPure *op_ADD_417 = ADD(op_LSHIFT_394, op_LSHIFT_416); + RzILOpPure *op_ADD_420 = ADD(op_ADD_417, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *cond_494 = ITE(DUP(op_EQ_372), op_ADD_420, VARL("h_tmp386")); + RzILOpPure *op_RSHIFT_498 = SHIFTRA(cond_494, SN(32, 16)); + RzILOpPure *op_AND_501 = LOGAND(op_RSHIFT_498, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_505 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_501), DUP(op_AND_501))), CAST(16, MSB(DUP(op_AND_501)), DUP(op_AND_501))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_510 = SHIFTL0(CAST(64, IL_FALSE, op_AND_505), SN(32, 16)); + RzILOpPure *op_OR_512 = LOGOR(CAST(64, IL_FALSE, op_AND_267), op_LSHIFT_510); + RzILOpEffect *op_ASSIGN_514 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_512)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_515 = SEQN(2, seq_493, op_ASSIGN_514); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_258, seq_515); + return instruction_sequence; +} + +// Rdd = vdmpy(Rss,Rtt):sat +RzILOpEffect *hex_il_op_m2_vdmpys_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_156 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_15, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_25 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_28 = LOGAND(op_RSHIFT_25, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_31 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(CAST(16, MSB(op_AND_28), DUP(op_AND_28))), CAST(16, MSB(DUP(op_AND_28)), DUP(op_AND_28)))); + RzILOpPure *op_LSHIFT_34 = SHIFTL0(CAST(64, MSB(op_MUL_31), DUP(op_MUL_31)), SN(32, 0)); + RzILOpPure *op_RSHIFT_38 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_41 = LOGAND(op_RSHIFT_38, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_47 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_50 = LOGAND(op_RSHIFT_47, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_53 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_41), DUP(op_AND_41))), CAST(16, MSB(DUP(op_AND_41)), DUP(op_AND_41))), CAST(32, MSB(CAST(16, MSB(op_AND_50), DUP(op_AND_50))), CAST(16, MSB(DUP(op_AND_50)), DUP(op_AND_50)))); + RzILOpPure *op_LSHIFT_56 = SHIFTL0(CAST(64, MSB(op_MUL_53), DUP(op_MUL_53)), SN(32, 0)); + RzILOpPure *op_ADD_57 = ADD(op_LSHIFT_34, op_LSHIFT_56); + RzILOpPure *op_RSHIFT_66 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_69 = LOGAND(op_RSHIFT_66, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_75 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_78 = LOGAND(op_RSHIFT_75, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_81 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_69), DUP(op_AND_69))), CAST(16, MSB(DUP(op_AND_69)), DUP(op_AND_69))), CAST(32, MSB(CAST(16, MSB(op_AND_78), DUP(op_AND_78))), CAST(16, MSB(DUP(op_AND_78)), DUP(op_AND_78)))); + RzILOpPure *op_LSHIFT_84 = SHIFTL0(CAST(64, MSB(op_MUL_81), DUP(op_MUL_81)), SN(32, 0)); + RzILOpPure *op_RSHIFT_88 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_91 = LOGAND(op_RSHIFT_88, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_97 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_100 = LOGAND(op_RSHIFT_97, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_103 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_91), DUP(op_AND_91))), CAST(16, MSB(DUP(op_AND_91)), DUP(op_AND_91))), CAST(32, MSB(CAST(16, MSB(op_AND_100), DUP(op_AND_100))), CAST(16, MSB(DUP(op_AND_100)), DUP(op_AND_100)))); + RzILOpPure *op_LSHIFT_106 = SHIFTL0(CAST(64, MSB(op_MUL_103), DUP(op_MUL_103)), SN(32, 0)); + RzILOpPure *op_ADD_107 = ADD(op_LSHIFT_84, op_LSHIFT_106); + RzILOpPure *op_EQ_108 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_57), SN(32, 0), SN(32, 0x20)), op_ADD_107); + RzILOpPure *op_RSHIFT_160 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_163 = LOGAND(op_RSHIFT_160, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_169 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_172 = LOGAND(op_RSHIFT_169, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_175 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_163), DUP(op_AND_163))), CAST(16, MSB(DUP(op_AND_163)), DUP(op_AND_163))), CAST(32, MSB(CAST(16, MSB(op_AND_172), DUP(op_AND_172))), CAST(16, MSB(DUP(op_AND_172)), DUP(op_AND_172)))); + RzILOpPure *op_LSHIFT_178 = SHIFTL0(CAST(64, MSB(op_MUL_175), DUP(op_MUL_175)), SN(32, 0)); + RzILOpPure *op_RSHIFT_182 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_185 = LOGAND(op_RSHIFT_182, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_191 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_194 = LOGAND(op_RSHIFT_191, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_197 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_185), DUP(op_AND_185))), CAST(16, MSB(DUP(op_AND_185)), DUP(op_AND_185))), CAST(32, MSB(CAST(16, MSB(op_AND_194), DUP(op_AND_194))), CAST(16, MSB(DUP(op_AND_194)), DUP(op_AND_194)))); + RzILOpPure *op_LSHIFT_200 = SHIFTL0(CAST(64, MSB(op_MUL_197), DUP(op_MUL_197)), SN(32, 0)); + RzILOpPure *op_ADD_201 = ADD(op_LSHIFT_178, op_LSHIFT_200); + RzILOpPure *op_LT_204 = SLT(op_ADD_201, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_209 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_210 = NEG(op_LSHIFT_209); + RzILOpPure *op_LSHIFT_215 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_218 = SUB(op_LSHIFT_215, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_219 = ITE(op_LT_204, op_NEG_210, op_SUB_218); + RzILOpEffect *gcc_expr_220 = BRANCH(op_EQ_108, EMPTY(), set_usr_field_call_156); + + // h_tmp387 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_222 = SETL("h_tmp387", cond_219); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_223 = SEQN(2, gcc_expr_220, op_ASSIGN_hybrid_tmp_222); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0)) ? (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x0) : h_tmp387) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_112 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_115 = LOGAND(op_RSHIFT_112, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_121 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_124 = LOGAND(op_RSHIFT_121, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_127 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_115), DUP(op_AND_115))), CAST(16, MSB(DUP(op_AND_115)), DUP(op_AND_115))), CAST(32, MSB(CAST(16, MSB(op_AND_124), DUP(op_AND_124))), CAST(16, MSB(DUP(op_AND_124)), DUP(op_AND_124)))); + RzILOpPure *op_LSHIFT_130 = SHIFTL0(CAST(64, MSB(op_MUL_127), DUP(op_MUL_127)), SN(32, 0)); + RzILOpPure *op_RSHIFT_134 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_137 = LOGAND(op_RSHIFT_134, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_143 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_146 = LOGAND(op_RSHIFT_143, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_149 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_137), DUP(op_AND_137))), CAST(16, MSB(DUP(op_AND_137)), DUP(op_AND_137))), CAST(32, MSB(CAST(16, MSB(op_AND_146), DUP(op_AND_146))), CAST(16, MSB(DUP(op_AND_146)), DUP(op_AND_146)))); + RzILOpPure *op_LSHIFT_152 = SHIFTL0(CAST(64, MSB(op_MUL_149), DUP(op_MUL_149)), SN(32, 0)); + RzILOpPure *op_ADD_153 = ADD(op_LSHIFT_130, op_LSHIFT_152); + RzILOpPure *cond_224 = ITE(DUP(op_EQ_108), op_ADD_153, VARL("h_tmp387")); + RzILOpPure *op_AND_226 = LOGAND(cond_224, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_230 = SHIFTL0(op_AND_226, SN(32, 0)); + RzILOpPure *op_OR_231 = LOGOR(op_AND_7, op_LSHIFT_230); + RzILOpEffect *op_ASSIGN_232 = WRITE_REG(bundle, Rdd_op, op_OR_231); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_233 = SEQN(2, seq_223, op_ASSIGN_232); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_388 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_248 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_251 = LOGAND(op_RSHIFT_248, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_257 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_260 = LOGAND(op_RSHIFT_257, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_263 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_251), DUP(op_AND_251))), CAST(16, MSB(DUP(op_AND_251)), DUP(op_AND_251))), CAST(32, MSB(CAST(16, MSB(op_AND_260), DUP(op_AND_260))), CAST(16, MSB(DUP(op_AND_260)), DUP(op_AND_260)))); + RzILOpPure *op_LSHIFT_266 = SHIFTL0(CAST(64, MSB(op_MUL_263), DUP(op_MUL_263)), SN(32, 0)); + RzILOpPure *op_RSHIFT_270 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_273 = LOGAND(op_RSHIFT_270, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_279 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_282 = LOGAND(op_RSHIFT_279, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_285 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_273), DUP(op_AND_273))), CAST(16, MSB(DUP(op_AND_273)), DUP(op_AND_273))), CAST(32, MSB(CAST(16, MSB(op_AND_282), DUP(op_AND_282))), CAST(16, MSB(DUP(op_AND_282)), DUP(op_AND_282)))); + RzILOpPure *op_LSHIFT_288 = SHIFTL0(CAST(64, MSB(op_MUL_285), DUP(op_MUL_285)), SN(32, 0)); + RzILOpPure *op_ADD_289 = ADD(op_LSHIFT_266, op_LSHIFT_288); + RzILOpPure *op_RSHIFT_298 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_301 = LOGAND(op_RSHIFT_298, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_307 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_310 = LOGAND(op_RSHIFT_307, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_313 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_301), DUP(op_AND_301))), CAST(16, MSB(DUP(op_AND_301)), DUP(op_AND_301))), CAST(32, MSB(CAST(16, MSB(op_AND_310), DUP(op_AND_310))), CAST(16, MSB(DUP(op_AND_310)), DUP(op_AND_310)))); + RzILOpPure *op_LSHIFT_316 = SHIFTL0(CAST(64, MSB(op_MUL_313), DUP(op_MUL_313)), SN(32, 0)); + RzILOpPure *op_RSHIFT_320 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_323 = LOGAND(op_RSHIFT_320, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_329 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_332 = LOGAND(op_RSHIFT_329, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_335 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_323), DUP(op_AND_323))), CAST(16, MSB(DUP(op_AND_323)), DUP(op_AND_323))), CAST(32, MSB(CAST(16, MSB(op_AND_332), DUP(op_AND_332))), CAST(16, MSB(DUP(op_AND_332)), DUP(op_AND_332)))); + RzILOpPure *op_LSHIFT_338 = SHIFTL0(CAST(64, MSB(op_MUL_335), DUP(op_MUL_335)), SN(32, 0)); + RzILOpPure *op_ADD_339 = ADD(op_LSHIFT_316, op_LSHIFT_338); + RzILOpPure *op_EQ_340 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_289), SN(32, 0), SN(32, 0x20)), op_ADD_339); + RzILOpPure *op_RSHIFT_392 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_395 = LOGAND(op_RSHIFT_392, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_401 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_404 = LOGAND(op_RSHIFT_401, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_407 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_395), DUP(op_AND_395))), CAST(16, MSB(DUP(op_AND_395)), DUP(op_AND_395))), CAST(32, MSB(CAST(16, MSB(op_AND_404), DUP(op_AND_404))), CAST(16, MSB(DUP(op_AND_404)), DUP(op_AND_404)))); + RzILOpPure *op_LSHIFT_410 = SHIFTL0(CAST(64, MSB(op_MUL_407), DUP(op_MUL_407)), SN(32, 0)); + RzILOpPure *op_RSHIFT_414 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_417 = LOGAND(op_RSHIFT_414, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_423 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_426 = LOGAND(op_RSHIFT_423, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_429 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_417), DUP(op_AND_417))), CAST(16, MSB(DUP(op_AND_417)), DUP(op_AND_417))), CAST(32, MSB(CAST(16, MSB(op_AND_426), DUP(op_AND_426))), CAST(16, MSB(DUP(op_AND_426)), DUP(op_AND_426)))); + RzILOpPure *op_LSHIFT_432 = SHIFTL0(CAST(64, MSB(op_MUL_429), DUP(op_MUL_429)), SN(32, 0)); + RzILOpPure *op_ADD_433 = ADD(op_LSHIFT_410, op_LSHIFT_432); + RzILOpPure *op_LT_436 = SLT(op_ADD_433, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_441 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_442 = NEG(op_LSHIFT_441); + RzILOpPure *op_LSHIFT_447 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_450 = SUB(op_LSHIFT_447, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_451 = ITE(op_LT_436, op_NEG_442, op_SUB_450); + RzILOpEffect *gcc_expr_452 = BRANCH(op_EQ_340, EMPTY(), set_usr_field_call_388); + + // h_tmp388 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_454 = SETL("h_tmp388", cond_451); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_455 = SEQN(2, gcc_expr_452, op_ASSIGN_hybrid_tmp_454); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0)) ? (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x0) : h_tmp388) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_239 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_240 = LOGNOT(op_LSHIFT_239); + RzILOpPure *op_AND_241 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_240); + RzILOpPure *op_RSHIFT_344 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_347 = LOGAND(op_RSHIFT_344, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_353 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_356 = LOGAND(op_RSHIFT_353, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_359 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_347), DUP(op_AND_347))), CAST(16, MSB(DUP(op_AND_347)), DUP(op_AND_347))), CAST(32, MSB(CAST(16, MSB(op_AND_356), DUP(op_AND_356))), CAST(16, MSB(DUP(op_AND_356)), DUP(op_AND_356)))); + RzILOpPure *op_LSHIFT_362 = SHIFTL0(CAST(64, MSB(op_MUL_359), DUP(op_MUL_359)), SN(32, 0)); + RzILOpPure *op_RSHIFT_366 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_369 = LOGAND(op_RSHIFT_366, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_375 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_378 = LOGAND(op_RSHIFT_375, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_381 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_369), DUP(op_AND_369))), CAST(16, MSB(DUP(op_AND_369)), DUP(op_AND_369))), CAST(32, MSB(CAST(16, MSB(op_AND_378), DUP(op_AND_378))), CAST(16, MSB(DUP(op_AND_378)), DUP(op_AND_378)))); + RzILOpPure *op_LSHIFT_384 = SHIFTL0(CAST(64, MSB(op_MUL_381), DUP(op_MUL_381)), SN(32, 0)); + RzILOpPure *op_ADD_385 = ADD(op_LSHIFT_362, op_LSHIFT_384); + RzILOpPure *cond_456 = ITE(DUP(op_EQ_340), op_ADD_385, VARL("h_tmp388")); + RzILOpPure *op_AND_458 = LOGAND(cond_456, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_462 = SHIFTL0(op_AND_458, SN(32, 0x20)); + RzILOpPure *op_OR_463 = LOGOR(op_AND_241, op_LSHIFT_462); + RzILOpEffect *op_ASSIGN_464 = WRITE_REG(bundle, Rdd_op, op_OR_463); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_465 = SEQN(2, seq_455, op_ASSIGN_464); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_233, seq_465); + return instruction_sequence; +} + +// Rdd = vdmpy(Rss,Rtt):<<1:sat +RzILOpEffect *hex_il_op_m2_vdmpys_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_156 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_15, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_25 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_28 = LOGAND(op_RSHIFT_25, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_31 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(CAST(16, MSB(op_AND_28), DUP(op_AND_28))), CAST(16, MSB(DUP(op_AND_28)), DUP(op_AND_28)))); + RzILOpPure *op_LSHIFT_34 = SHIFTL0(CAST(64, MSB(op_MUL_31), DUP(op_MUL_31)), SN(32, 1)); + RzILOpPure *op_RSHIFT_38 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_41 = LOGAND(op_RSHIFT_38, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_47 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_50 = LOGAND(op_RSHIFT_47, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_53 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_41), DUP(op_AND_41))), CAST(16, MSB(DUP(op_AND_41)), DUP(op_AND_41))), CAST(32, MSB(CAST(16, MSB(op_AND_50), DUP(op_AND_50))), CAST(16, MSB(DUP(op_AND_50)), DUP(op_AND_50)))); + RzILOpPure *op_LSHIFT_56 = SHIFTL0(CAST(64, MSB(op_MUL_53), DUP(op_MUL_53)), SN(32, 1)); + RzILOpPure *op_ADD_57 = ADD(op_LSHIFT_34, op_LSHIFT_56); + RzILOpPure *op_RSHIFT_66 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_69 = LOGAND(op_RSHIFT_66, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_75 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_78 = LOGAND(op_RSHIFT_75, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_81 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_69), DUP(op_AND_69))), CAST(16, MSB(DUP(op_AND_69)), DUP(op_AND_69))), CAST(32, MSB(CAST(16, MSB(op_AND_78), DUP(op_AND_78))), CAST(16, MSB(DUP(op_AND_78)), DUP(op_AND_78)))); + RzILOpPure *op_LSHIFT_84 = SHIFTL0(CAST(64, MSB(op_MUL_81), DUP(op_MUL_81)), SN(32, 1)); + RzILOpPure *op_RSHIFT_88 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_91 = LOGAND(op_RSHIFT_88, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_97 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_100 = LOGAND(op_RSHIFT_97, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_103 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_91), DUP(op_AND_91))), CAST(16, MSB(DUP(op_AND_91)), DUP(op_AND_91))), CAST(32, MSB(CAST(16, MSB(op_AND_100), DUP(op_AND_100))), CAST(16, MSB(DUP(op_AND_100)), DUP(op_AND_100)))); + RzILOpPure *op_LSHIFT_106 = SHIFTL0(CAST(64, MSB(op_MUL_103), DUP(op_MUL_103)), SN(32, 1)); + RzILOpPure *op_ADD_107 = ADD(op_LSHIFT_84, op_LSHIFT_106); + RzILOpPure *op_EQ_108 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_57), SN(32, 0), SN(32, 0x20)), op_ADD_107); + RzILOpPure *op_RSHIFT_160 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_163 = LOGAND(op_RSHIFT_160, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_169 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_172 = LOGAND(op_RSHIFT_169, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_175 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_163), DUP(op_AND_163))), CAST(16, MSB(DUP(op_AND_163)), DUP(op_AND_163))), CAST(32, MSB(CAST(16, MSB(op_AND_172), DUP(op_AND_172))), CAST(16, MSB(DUP(op_AND_172)), DUP(op_AND_172)))); + RzILOpPure *op_LSHIFT_178 = SHIFTL0(CAST(64, MSB(op_MUL_175), DUP(op_MUL_175)), SN(32, 1)); + RzILOpPure *op_RSHIFT_182 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_185 = LOGAND(op_RSHIFT_182, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_191 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_194 = LOGAND(op_RSHIFT_191, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_197 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_185), DUP(op_AND_185))), CAST(16, MSB(DUP(op_AND_185)), DUP(op_AND_185))), CAST(32, MSB(CAST(16, MSB(op_AND_194), DUP(op_AND_194))), CAST(16, MSB(DUP(op_AND_194)), DUP(op_AND_194)))); + RzILOpPure *op_LSHIFT_200 = SHIFTL0(CAST(64, MSB(op_MUL_197), DUP(op_MUL_197)), SN(32, 1)); + RzILOpPure *op_ADD_201 = ADD(op_LSHIFT_178, op_LSHIFT_200); + RzILOpPure *op_LT_204 = SLT(op_ADD_201, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_209 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_210 = NEG(op_LSHIFT_209); + RzILOpPure *op_LSHIFT_215 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_218 = SUB(op_LSHIFT_215, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_219 = ITE(op_LT_204, op_NEG_210, op_SUB_218); + RzILOpEffect *gcc_expr_220 = BRANCH(op_EQ_108, EMPTY(), set_usr_field_call_156); + + // h_tmp389 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_222 = SETL("h_tmp389", cond_219); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_223 = SEQN(2, gcc_expr_220, op_ASSIGN_hybrid_tmp_222); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1)) ? (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) << 0x1) : h_tmp389) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_112 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_115 = LOGAND(op_RSHIFT_112, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_121 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_124 = LOGAND(op_RSHIFT_121, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_127 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_115), DUP(op_AND_115))), CAST(16, MSB(DUP(op_AND_115)), DUP(op_AND_115))), CAST(32, MSB(CAST(16, MSB(op_AND_124), DUP(op_AND_124))), CAST(16, MSB(DUP(op_AND_124)), DUP(op_AND_124)))); + RzILOpPure *op_LSHIFT_130 = SHIFTL0(CAST(64, MSB(op_MUL_127), DUP(op_MUL_127)), SN(32, 1)); + RzILOpPure *op_RSHIFT_134 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_137 = LOGAND(op_RSHIFT_134, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_143 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_146 = LOGAND(op_RSHIFT_143, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_149 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_137), DUP(op_AND_137))), CAST(16, MSB(DUP(op_AND_137)), DUP(op_AND_137))), CAST(32, MSB(CAST(16, MSB(op_AND_146), DUP(op_AND_146))), CAST(16, MSB(DUP(op_AND_146)), DUP(op_AND_146)))); + RzILOpPure *op_LSHIFT_152 = SHIFTL0(CAST(64, MSB(op_MUL_149), DUP(op_MUL_149)), SN(32, 1)); + RzILOpPure *op_ADD_153 = ADD(op_LSHIFT_130, op_LSHIFT_152); + RzILOpPure *cond_224 = ITE(DUP(op_EQ_108), op_ADD_153, VARL("h_tmp389")); + RzILOpPure *op_AND_226 = LOGAND(cond_224, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_230 = SHIFTL0(op_AND_226, SN(32, 0)); + RzILOpPure *op_OR_231 = LOGOR(op_AND_7, op_LSHIFT_230); + RzILOpEffect *op_ASSIGN_232 = WRITE_REG(bundle, Rdd_op, op_OR_231); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_233 = SEQN(2, seq_223, op_ASSIGN_232); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_388 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_248 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_251 = LOGAND(op_RSHIFT_248, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_257 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_260 = LOGAND(op_RSHIFT_257, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_263 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_251), DUP(op_AND_251))), CAST(16, MSB(DUP(op_AND_251)), DUP(op_AND_251))), CAST(32, MSB(CAST(16, MSB(op_AND_260), DUP(op_AND_260))), CAST(16, MSB(DUP(op_AND_260)), DUP(op_AND_260)))); + RzILOpPure *op_LSHIFT_266 = SHIFTL0(CAST(64, MSB(op_MUL_263), DUP(op_MUL_263)), SN(32, 1)); + RzILOpPure *op_RSHIFT_270 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_273 = LOGAND(op_RSHIFT_270, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_279 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_282 = LOGAND(op_RSHIFT_279, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_285 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_273), DUP(op_AND_273))), CAST(16, MSB(DUP(op_AND_273)), DUP(op_AND_273))), CAST(32, MSB(CAST(16, MSB(op_AND_282), DUP(op_AND_282))), CAST(16, MSB(DUP(op_AND_282)), DUP(op_AND_282)))); + RzILOpPure *op_LSHIFT_288 = SHIFTL0(CAST(64, MSB(op_MUL_285), DUP(op_MUL_285)), SN(32, 1)); + RzILOpPure *op_ADD_289 = ADD(op_LSHIFT_266, op_LSHIFT_288); + RzILOpPure *op_RSHIFT_298 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_301 = LOGAND(op_RSHIFT_298, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_307 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_310 = LOGAND(op_RSHIFT_307, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_313 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_301), DUP(op_AND_301))), CAST(16, MSB(DUP(op_AND_301)), DUP(op_AND_301))), CAST(32, MSB(CAST(16, MSB(op_AND_310), DUP(op_AND_310))), CAST(16, MSB(DUP(op_AND_310)), DUP(op_AND_310)))); + RzILOpPure *op_LSHIFT_316 = SHIFTL0(CAST(64, MSB(op_MUL_313), DUP(op_MUL_313)), SN(32, 1)); + RzILOpPure *op_RSHIFT_320 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_323 = LOGAND(op_RSHIFT_320, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_329 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_332 = LOGAND(op_RSHIFT_329, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_335 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_323), DUP(op_AND_323))), CAST(16, MSB(DUP(op_AND_323)), DUP(op_AND_323))), CAST(32, MSB(CAST(16, MSB(op_AND_332), DUP(op_AND_332))), CAST(16, MSB(DUP(op_AND_332)), DUP(op_AND_332)))); + RzILOpPure *op_LSHIFT_338 = SHIFTL0(CAST(64, MSB(op_MUL_335), DUP(op_MUL_335)), SN(32, 1)); + RzILOpPure *op_ADD_339 = ADD(op_LSHIFT_316, op_LSHIFT_338); + RzILOpPure *op_EQ_340 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_289), SN(32, 0), SN(32, 0x20)), op_ADD_339); + RzILOpPure *op_RSHIFT_392 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_395 = LOGAND(op_RSHIFT_392, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_401 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_404 = LOGAND(op_RSHIFT_401, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_407 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_395), DUP(op_AND_395))), CAST(16, MSB(DUP(op_AND_395)), DUP(op_AND_395))), CAST(32, MSB(CAST(16, MSB(op_AND_404), DUP(op_AND_404))), CAST(16, MSB(DUP(op_AND_404)), DUP(op_AND_404)))); + RzILOpPure *op_LSHIFT_410 = SHIFTL0(CAST(64, MSB(op_MUL_407), DUP(op_MUL_407)), SN(32, 1)); + RzILOpPure *op_RSHIFT_414 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_417 = LOGAND(op_RSHIFT_414, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_423 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_426 = LOGAND(op_RSHIFT_423, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_429 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_417), DUP(op_AND_417))), CAST(16, MSB(DUP(op_AND_417)), DUP(op_AND_417))), CAST(32, MSB(CAST(16, MSB(op_AND_426), DUP(op_AND_426))), CAST(16, MSB(DUP(op_AND_426)), DUP(op_AND_426)))); + RzILOpPure *op_LSHIFT_432 = SHIFTL0(CAST(64, MSB(op_MUL_429), DUP(op_MUL_429)), SN(32, 1)); + RzILOpPure *op_ADD_433 = ADD(op_LSHIFT_410, op_LSHIFT_432); + RzILOpPure *op_LT_436 = SLT(op_ADD_433, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_441 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_442 = NEG(op_LSHIFT_441); + RzILOpPure *op_LSHIFT_447 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_450 = SUB(op_LSHIFT_447, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_451 = ITE(op_LT_436, op_NEG_442, op_SUB_450); + RzILOpEffect *gcc_expr_452 = BRANCH(op_EQ_340, EMPTY(), set_usr_field_call_388); + + // h_tmp390 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_454 = SETL("h_tmp390", cond_451); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_455 = SEQN(2, gcc_expr_452, op_ASSIGN_hybrid_tmp_454); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1)) ? (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) << 0x1) : h_tmp390) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_239 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_240 = LOGNOT(op_LSHIFT_239); + RzILOpPure *op_AND_241 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_240); + RzILOpPure *op_RSHIFT_344 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_347 = LOGAND(op_RSHIFT_344, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_353 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_356 = LOGAND(op_RSHIFT_353, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_359 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_347), DUP(op_AND_347))), CAST(16, MSB(DUP(op_AND_347)), DUP(op_AND_347))), CAST(32, MSB(CAST(16, MSB(op_AND_356), DUP(op_AND_356))), CAST(16, MSB(DUP(op_AND_356)), DUP(op_AND_356)))); + RzILOpPure *op_LSHIFT_362 = SHIFTL0(CAST(64, MSB(op_MUL_359), DUP(op_MUL_359)), SN(32, 1)); + RzILOpPure *op_RSHIFT_366 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_369 = LOGAND(op_RSHIFT_366, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_375 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_378 = LOGAND(op_RSHIFT_375, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_381 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_369), DUP(op_AND_369))), CAST(16, MSB(DUP(op_AND_369)), DUP(op_AND_369))), CAST(32, MSB(CAST(16, MSB(op_AND_378), DUP(op_AND_378))), CAST(16, MSB(DUP(op_AND_378)), DUP(op_AND_378)))); + RzILOpPure *op_LSHIFT_384 = SHIFTL0(CAST(64, MSB(op_MUL_381), DUP(op_MUL_381)), SN(32, 1)); + RzILOpPure *op_ADD_385 = ADD(op_LSHIFT_362, op_LSHIFT_384); + RzILOpPure *cond_456 = ITE(DUP(op_EQ_340), op_ADD_385, VARL("h_tmp390")); + RzILOpPure *op_AND_458 = LOGAND(cond_456, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_462 = SHIFTL0(op_AND_458, SN(32, 0x20)); + RzILOpPure *op_OR_463 = LOGOR(op_AND_241, op_LSHIFT_462); + RzILOpEffect *op_ASSIGN_464 = WRITE_REG(bundle, Rdd_op, op_OR_463); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_465 = SEQN(2, seq_455, op_ASSIGN_464); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_233, seq_465); + return instruction_sequence; +} + +// Rxx += vmpyh(Rs,Rt) +RzILOpEffect *hex_il_op_m2_vmac2(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_11 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_13 = LOGAND(op_RSHIFT_11, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_20 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_20, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_29 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_31 = LOGAND(op_RSHIFT_29, SN(32, 0xffff)); + RzILOpPure *op_MUL_34 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_22), DUP(op_AND_22))), CAST(16, MSB(DUP(op_AND_22)), DUP(op_AND_22))), CAST(32, MSB(CAST(16, MSB(op_AND_31), DUP(op_AND_31))), CAST(16, MSB(DUP(op_AND_31)), DUP(op_AND_31)))); + RzILOpPure *op_ADD_36 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_13), DUP(op_AND_13))), CAST(32, MSB(DUP(op_AND_13)), DUP(op_AND_13))), CAST(64, MSB(op_MUL_34), DUP(op_MUL_34))); + RzILOpPure *op_AND_38 = LOGAND(op_ADD_36, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_42 = SHIFTL0(op_AND_38, SN(32, 0)); + RzILOpPure *op_OR_43 = LOGOR(op_AND_7, op_LSHIFT_42); + RzILOpEffect *op_ASSIGN_44 = WRITE_REG(bundle, Rxx_op, op_OR_43); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_50 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_51 = LOGNOT(op_LSHIFT_50); + RzILOpPure *op_AND_52 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_51); + RzILOpPure *op_RSHIFT_56 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_58 = LOGAND(op_RSHIFT_56, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_64 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_66 = LOGAND(op_RSHIFT_64, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_72 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_74 = LOGAND(op_RSHIFT_72, SN(32, 0xffff)); + RzILOpPure *op_MUL_77 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_66), DUP(op_AND_66))), CAST(16, MSB(DUP(op_AND_66)), DUP(op_AND_66))), CAST(32, MSB(CAST(16, MSB(op_AND_74), DUP(op_AND_74))), CAST(16, MSB(DUP(op_AND_74)), DUP(op_AND_74)))); + RzILOpPure *op_ADD_79 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_58), DUP(op_AND_58))), CAST(32, MSB(DUP(op_AND_58)), DUP(op_AND_58))), CAST(64, MSB(op_MUL_77), DUP(op_MUL_77))); + RzILOpPure *op_AND_81 = LOGAND(op_ADD_79, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_85 = SHIFTL0(op_AND_81, SN(32, 0x20)); + RzILOpPure *op_OR_86 = LOGOR(op_AND_52, op_LSHIFT_85); + RzILOpEffect *op_ASSIGN_87 = WRITE_REG(bundle, Rxx_op, op_OR_86); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_44, op_ASSIGN_87); + return instruction_sequence; +} + +// Rxx += vmpyeh(Rss,Rtt) +RzILOpEffect *hex_il_op_m2_vmac2es(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_11 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_13 = LOGAND(op_RSHIFT_11, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_20 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_23 = LOGAND(op_RSHIFT_20, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_30 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_33 = LOGAND(op_RSHIFT_30, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_36 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_23), DUP(op_AND_23))), CAST(16, MSB(DUP(op_AND_23)), DUP(op_AND_23))), CAST(32, MSB(CAST(16, MSB(op_AND_33), DUP(op_AND_33))), CAST(16, MSB(DUP(op_AND_33)), DUP(op_AND_33)))); + RzILOpPure *op_ADD_38 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_13), DUP(op_AND_13))), CAST(32, MSB(DUP(op_AND_13)), DUP(op_AND_13))), CAST(64, MSB(op_MUL_36), DUP(op_MUL_36))); + RzILOpPure *op_AND_40 = LOGAND(op_ADD_38, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_44 = SHIFTL0(op_AND_40, SN(32, 0)); + RzILOpPure *op_OR_45 = LOGOR(op_AND_7, op_LSHIFT_44); + RzILOpEffect *op_ASSIGN_46 = WRITE_REG(bundle, Rxx_op, op_OR_45); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_52 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_53 = LOGNOT(op_LSHIFT_52); + RzILOpPure *op_AND_54 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_53); + RzILOpPure *op_RSHIFT_58 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_60 = LOGAND(op_RSHIFT_58, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_66 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_69 = LOGAND(op_RSHIFT_66, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_75 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_78 = LOGAND(op_RSHIFT_75, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_81 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_69), DUP(op_AND_69))), CAST(16, MSB(DUP(op_AND_69)), DUP(op_AND_69))), CAST(32, MSB(CAST(16, MSB(op_AND_78), DUP(op_AND_78))), CAST(16, MSB(DUP(op_AND_78)), DUP(op_AND_78)))); + RzILOpPure *op_ADD_83 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_60), DUP(op_AND_60))), CAST(32, MSB(DUP(op_AND_60)), DUP(op_AND_60))), CAST(64, MSB(op_MUL_81), DUP(op_MUL_81))); + RzILOpPure *op_AND_85 = LOGAND(op_ADD_83, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_89 = SHIFTL0(op_AND_85, SN(32, 0x20)); + RzILOpPure *op_OR_90 = LOGOR(op_AND_54, op_LSHIFT_89); + RzILOpEffect *op_ASSIGN_91 = WRITE_REG(bundle, Rxx_op, op_OR_90); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_46, op_ASSIGN_91); + return instruction_sequence; +} + +// Rxx += vmpyeh(Rss,Rtt):sat +RzILOpEffect *hex_il_op_m2_vmac2es_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_114 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_26 = LOGAND(op_RSHIFT_23, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_33 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_36 = LOGAND(op_RSHIFT_33, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_39 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_26), DUP(op_AND_26))), CAST(16, MSB(DUP(op_AND_26)), DUP(op_AND_26))), CAST(32, MSB(CAST(16, MSB(op_AND_36), DUP(op_AND_36))), CAST(16, MSB(DUP(op_AND_36)), DUP(op_AND_36)))); + RzILOpPure *op_LSHIFT_42 = SHIFTL0(CAST(64, MSB(op_MUL_39), DUP(op_MUL_39)), SN(32, 0)); + RzILOpPure *op_ADD_43 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_16), DUP(op_AND_16))), CAST(32, MSB(DUP(op_AND_16)), DUP(op_AND_16))), op_LSHIFT_42); + RzILOpPure *op_RSHIFT_52 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_54 = LOGAND(op_RSHIFT_52, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_60 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_63 = LOGAND(op_RSHIFT_60, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_69 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_72 = LOGAND(op_RSHIFT_69, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_75 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_63), DUP(op_AND_63))), CAST(16, MSB(DUP(op_AND_63)), DUP(op_AND_63))), CAST(32, MSB(CAST(16, MSB(op_AND_72), DUP(op_AND_72))), CAST(16, MSB(DUP(op_AND_72)), DUP(op_AND_72)))); + RzILOpPure *op_LSHIFT_78 = SHIFTL0(CAST(64, MSB(op_MUL_75), DUP(op_MUL_75)), SN(32, 0)); + RzILOpPure *op_ADD_79 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_54), DUP(op_AND_54))), CAST(32, MSB(DUP(op_AND_54)), DUP(op_AND_54))), op_LSHIFT_78); + RzILOpPure *op_EQ_80 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_43), SN(32, 0), SN(32, 0x20)), op_ADD_79); + RzILOpPure *op_RSHIFT_118 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_120 = LOGAND(op_RSHIFT_118, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_126 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_129 = LOGAND(op_RSHIFT_126, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_135 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_138 = LOGAND(op_RSHIFT_135, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_141 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_129), DUP(op_AND_129))), CAST(16, MSB(DUP(op_AND_129)), DUP(op_AND_129))), CAST(32, MSB(CAST(16, MSB(op_AND_138), DUP(op_AND_138))), CAST(16, MSB(DUP(op_AND_138)), DUP(op_AND_138)))); + RzILOpPure *op_LSHIFT_144 = SHIFTL0(CAST(64, MSB(op_MUL_141), DUP(op_MUL_141)), SN(32, 0)); + RzILOpPure *op_ADD_145 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_120), DUP(op_AND_120))), CAST(32, MSB(DUP(op_AND_120)), DUP(op_AND_120))), op_LSHIFT_144); + RzILOpPure *op_LT_148 = SLT(op_ADD_145, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_153 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_154 = NEG(op_LSHIFT_153); + RzILOpPure *op_LSHIFT_159 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_162 = SUB(op_LSHIFT_159, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_163 = ITE(op_LT_148, op_NEG_154, op_SUB_162); + RzILOpEffect *gcc_expr_164 = BRANCH(op_EQ_80, EMPTY(), set_usr_field_call_114); + + // h_tmp391 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_166 = SETL("h_tmp391", cond_163); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_167 = SEQN(2, gcc_expr_164, op_ASSIGN_hybrid_tmp_166); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0)) ? ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0) : h_tmp391) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_84 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_86 = LOGAND(op_RSHIFT_84, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_92 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_95 = LOGAND(op_RSHIFT_92, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_101 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_104 = LOGAND(op_RSHIFT_101, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_107 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_95), DUP(op_AND_95))), CAST(16, MSB(DUP(op_AND_95)), DUP(op_AND_95))), CAST(32, MSB(CAST(16, MSB(op_AND_104), DUP(op_AND_104))), CAST(16, MSB(DUP(op_AND_104)), DUP(op_AND_104)))); + RzILOpPure *op_LSHIFT_110 = SHIFTL0(CAST(64, MSB(op_MUL_107), DUP(op_MUL_107)), SN(32, 0)); + RzILOpPure *op_ADD_111 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_86), DUP(op_AND_86))), CAST(32, MSB(DUP(op_AND_86)), DUP(op_AND_86))), op_LSHIFT_110); + RzILOpPure *cond_168 = ITE(DUP(op_EQ_80), op_ADD_111, VARL("h_tmp391")); + RzILOpPure *op_AND_170 = LOGAND(cond_168, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_174 = SHIFTL0(op_AND_170, SN(32, 0)); + RzILOpPure *op_OR_175 = LOGOR(op_AND_7, op_LSHIFT_174); + RzILOpEffect *op_ASSIGN_176 = WRITE_REG(bundle, Rxx_op, op_OR_175); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_177 = SEQN(2, seq_167, op_ASSIGN_176); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_290 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_192 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_194 = LOGAND(op_RSHIFT_192, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_200 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_203 = LOGAND(op_RSHIFT_200, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_209 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_212 = LOGAND(op_RSHIFT_209, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_215 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_203), DUP(op_AND_203))), CAST(16, MSB(DUP(op_AND_203)), DUP(op_AND_203))), CAST(32, MSB(CAST(16, MSB(op_AND_212), DUP(op_AND_212))), CAST(16, MSB(DUP(op_AND_212)), DUP(op_AND_212)))); + RzILOpPure *op_LSHIFT_218 = SHIFTL0(CAST(64, MSB(op_MUL_215), DUP(op_MUL_215)), SN(32, 0)); + RzILOpPure *op_ADD_219 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_194), DUP(op_AND_194))), CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))), op_LSHIFT_218); + RzILOpPure *op_RSHIFT_228 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_230 = LOGAND(op_RSHIFT_228, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_236 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_239 = LOGAND(op_RSHIFT_236, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_245 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_248 = LOGAND(op_RSHIFT_245, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_251 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_239), DUP(op_AND_239))), CAST(16, MSB(DUP(op_AND_239)), DUP(op_AND_239))), CAST(32, MSB(CAST(16, MSB(op_AND_248), DUP(op_AND_248))), CAST(16, MSB(DUP(op_AND_248)), DUP(op_AND_248)))); + RzILOpPure *op_LSHIFT_254 = SHIFTL0(CAST(64, MSB(op_MUL_251), DUP(op_MUL_251)), SN(32, 0)); + RzILOpPure *op_ADD_255 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_230), DUP(op_AND_230))), CAST(32, MSB(DUP(op_AND_230)), DUP(op_AND_230))), op_LSHIFT_254); + RzILOpPure *op_EQ_256 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_219), SN(32, 0), SN(32, 0x20)), op_ADD_255); + RzILOpPure *op_RSHIFT_294 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_296 = LOGAND(op_RSHIFT_294, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_302 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_305 = LOGAND(op_RSHIFT_302, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_311 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_314 = LOGAND(op_RSHIFT_311, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_317 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_305), DUP(op_AND_305))), CAST(16, MSB(DUP(op_AND_305)), DUP(op_AND_305))), CAST(32, MSB(CAST(16, MSB(op_AND_314), DUP(op_AND_314))), CAST(16, MSB(DUP(op_AND_314)), DUP(op_AND_314)))); + RzILOpPure *op_LSHIFT_320 = SHIFTL0(CAST(64, MSB(op_MUL_317), DUP(op_MUL_317)), SN(32, 0)); + RzILOpPure *op_ADD_321 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_296), DUP(op_AND_296))), CAST(32, MSB(DUP(op_AND_296)), DUP(op_AND_296))), op_LSHIFT_320); + RzILOpPure *op_LT_324 = SLT(op_ADD_321, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_329 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_330 = NEG(op_LSHIFT_329); + RzILOpPure *op_LSHIFT_335 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_338 = SUB(op_LSHIFT_335, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_339 = ITE(op_LT_324, op_NEG_330, op_SUB_338); + RzILOpEffect *gcc_expr_340 = BRANCH(op_EQ_256, EMPTY(), set_usr_field_call_290); + + // h_tmp392 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_342 = SETL("h_tmp392", cond_339); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_343 = SEQN(2, gcc_expr_340, op_ASSIGN_hybrid_tmp_342); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0)) ? ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0) : h_tmp392) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_183 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_184 = LOGNOT(op_LSHIFT_183); + RzILOpPure *op_AND_185 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_184); + RzILOpPure *op_RSHIFT_260 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_262 = LOGAND(op_RSHIFT_260, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_268 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_271 = LOGAND(op_RSHIFT_268, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_277 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_280 = LOGAND(op_RSHIFT_277, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_283 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_271), DUP(op_AND_271))), CAST(16, MSB(DUP(op_AND_271)), DUP(op_AND_271))), CAST(32, MSB(CAST(16, MSB(op_AND_280), DUP(op_AND_280))), CAST(16, MSB(DUP(op_AND_280)), DUP(op_AND_280)))); + RzILOpPure *op_LSHIFT_286 = SHIFTL0(CAST(64, MSB(op_MUL_283), DUP(op_MUL_283)), SN(32, 0)); + RzILOpPure *op_ADD_287 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_262), DUP(op_AND_262))), CAST(32, MSB(DUP(op_AND_262)), DUP(op_AND_262))), op_LSHIFT_286); + RzILOpPure *cond_344 = ITE(DUP(op_EQ_256), op_ADD_287, VARL("h_tmp392")); + RzILOpPure *op_AND_346 = LOGAND(cond_344, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_350 = SHIFTL0(op_AND_346, SN(32, 0x20)); + RzILOpPure *op_OR_351 = LOGOR(op_AND_185, op_LSHIFT_350); + RzILOpEffect *op_ASSIGN_352 = WRITE_REG(bundle, Rxx_op, op_OR_351); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_353 = SEQN(2, seq_343, op_ASSIGN_352); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_177, seq_353); + return instruction_sequence; +} + +// Rxx += vmpyeh(Rss,Rtt):<<1:sat +RzILOpEffect *hex_il_op_m2_vmac2es_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_114 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_26 = LOGAND(op_RSHIFT_23, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_33 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_36 = LOGAND(op_RSHIFT_33, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_39 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_26), DUP(op_AND_26))), CAST(16, MSB(DUP(op_AND_26)), DUP(op_AND_26))), CAST(32, MSB(CAST(16, MSB(op_AND_36), DUP(op_AND_36))), CAST(16, MSB(DUP(op_AND_36)), DUP(op_AND_36)))); + RzILOpPure *op_LSHIFT_42 = SHIFTL0(CAST(64, MSB(op_MUL_39), DUP(op_MUL_39)), SN(32, 1)); + RzILOpPure *op_ADD_43 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_16), DUP(op_AND_16))), CAST(32, MSB(DUP(op_AND_16)), DUP(op_AND_16))), op_LSHIFT_42); + RzILOpPure *op_RSHIFT_52 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_54 = LOGAND(op_RSHIFT_52, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_60 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_63 = LOGAND(op_RSHIFT_60, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_69 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_72 = LOGAND(op_RSHIFT_69, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_75 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_63), DUP(op_AND_63))), CAST(16, MSB(DUP(op_AND_63)), DUP(op_AND_63))), CAST(32, MSB(CAST(16, MSB(op_AND_72), DUP(op_AND_72))), CAST(16, MSB(DUP(op_AND_72)), DUP(op_AND_72)))); + RzILOpPure *op_LSHIFT_78 = SHIFTL0(CAST(64, MSB(op_MUL_75), DUP(op_MUL_75)), SN(32, 1)); + RzILOpPure *op_ADD_79 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_54), DUP(op_AND_54))), CAST(32, MSB(DUP(op_AND_54)), DUP(op_AND_54))), op_LSHIFT_78); + RzILOpPure *op_EQ_80 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_43), SN(32, 0), SN(32, 0x20)), op_ADD_79); + RzILOpPure *op_RSHIFT_118 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_120 = LOGAND(op_RSHIFT_118, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_126 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_129 = LOGAND(op_RSHIFT_126, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_135 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_138 = LOGAND(op_RSHIFT_135, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_141 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_129), DUP(op_AND_129))), CAST(16, MSB(DUP(op_AND_129)), DUP(op_AND_129))), CAST(32, MSB(CAST(16, MSB(op_AND_138), DUP(op_AND_138))), CAST(16, MSB(DUP(op_AND_138)), DUP(op_AND_138)))); + RzILOpPure *op_LSHIFT_144 = SHIFTL0(CAST(64, MSB(op_MUL_141), DUP(op_MUL_141)), SN(32, 1)); + RzILOpPure *op_ADD_145 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_120), DUP(op_AND_120))), CAST(32, MSB(DUP(op_AND_120)), DUP(op_AND_120))), op_LSHIFT_144); + RzILOpPure *op_LT_148 = SLT(op_ADD_145, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_153 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_154 = NEG(op_LSHIFT_153); + RzILOpPure *op_LSHIFT_159 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_162 = SUB(op_LSHIFT_159, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_163 = ITE(op_LT_148, op_NEG_154, op_SUB_162); + RzILOpEffect *gcc_expr_164 = BRANCH(op_EQ_80, EMPTY(), set_usr_field_call_114); + + // h_tmp393 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_166 = SETL("h_tmp393", cond_163); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_167 = SEQN(2, gcc_expr_164, op_ASSIGN_hybrid_tmp_166); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1)) ? ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1) : h_tmp393) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_84 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_86 = LOGAND(op_RSHIFT_84, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_92 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_95 = LOGAND(op_RSHIFT_92, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_101 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_104 = LOGAND(op_RSHIFT_101, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_107 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_95), DUP(op_AND_95))), CAST(16, MSB(DUP(op_AND_95)), DUP(op_AND_95))), CAST(32, MSB(CAST(16, MSB(op_AND_104), DUP(op_AND_104))), CAST(16, MSB(DUP(op_AND_104)), DUP(op_AND_104)))); + RzILOpPure *op_LSHIFT_110 = SHIFTL0(CAST(64, MSB(op_MUL_107), DUP(op_MUL_107)), SN(32, 1)); + RzILOpPure *op_ADD_111 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_86), DUP(op_AND_86))), CAST(32, MSB(DUP(op_AND_86)), DUP(op_AND_86))), op_LSHIFT_110); + RzILOpPure *cond_168 = ITE(DUP(op_EQ_80), op_ADD_111, VARL("h_tmp393")); + RzILOpPure *op_AND_170 = LOGAND(cond_168, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_174 = SHIFTL0(op_AND_170, SN(32, 0)); + RzILOpPure *op_OR_175 = LOGOR(op_AND_7, op_LSHIFT_174); + RzILOpEffect *op_ASSIGN_176 = WRITE_REG(bundle, Rxx_op, op_OR_175); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_177 = SEQN(2, seq_167, op_ASSIGN_176); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_290 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_192 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_194 = LOGAND(op_RSHIFT_192, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_200 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_203 = LOGAND(op_RSHIFT_200, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_209 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_212 = LOGAND(op_RSHIFT_209, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_215 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_203), DUP(op_AND_203))), CAST(16, MSB(DUP(op_AND_203)), DUP(op_AND_203))), CAST(32, MSB(CAST(16, MSB(op_AND_212), DUP(op_AND_212))), CAST(16, MSB(DUP(op_AND_212)), DUP(op_AND_212)))); + RzILOpPure *op_LSHIFT_218 = SHIFTL0(CAST(64, MSB(op_MUL_215), DUP(op_MUL_215)), SN(32, 1)); + RzILOpPure *op_ADD_219 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_194), DUP(op_AND_194))), CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))), op_LSHIFT_218); + RzILOpPure *op_RSHIFT_228 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_230 = LOGAND(op_RSHIFT_228, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_236 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_239 = LOGAND(op_RSHIFT_236, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_245 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_248 = LOGAND(op_RSHIFT_245, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_251 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_239), DUP(op_AND_239))), CAST(16, MSB(DUP(op_AND_239)), DUP(op_AND_239))), CAST(32, MSB(CAST(16, MSB(op_AND_248), DUP(op_AND_248))), CAST(16, MSB(DUP(op_AND_248)), DUP(op_AND_248)))); + RzILOpPure *op_LSHIFT_254 = SHIFTL0(CAST(64, MSB(op_MUL_251), DUP(op_MUL_251)), SN(32, 1)); + RzILOpPure *op_ADD_255 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_230), DUP(op_AND_230))), CAST(32, MSB(DUP(op_AND_230)), DUP(op_AND_230))), op_LSHIFT_254); + RzILOpPure *op_EQ_256 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_219), SN(32, 0), SN(32, 0x20)), op_ADD_255); + RzILOpPure *op_RSHIFT_294 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_296 = LOGAND(op_RSHIFT_294, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_302 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_305 = LOGAND(op_RSHIFT_302, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_311 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_314 = LOGAND(op_RSHIFT_311, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_317 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_305), DUP(op_AND_305))), CAST(16, MSB(DUP(op_AND_305)), DUP(op_AND_305))), CAST(32, MSB(CAST(16, MSB(op_AND_314), DUP(op_AND_314))), CAST(16, MSB(DUP(op_AND_314)), DUP(op_AND_314)))); + RzILOpPure *op_LSHIFT_320 = SHIFTL0(CAST(64, MSB(op_MUL_317), DUP(op_MUL_317)), SN(32, 1)); + RzILOpPure *op_ADD_321 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_296), DUP(op_AND_296))), CAST(32, MSB(DUP(op_AND_296)), DUP(op_AND_296))), op_LSHIFT_320); + RzILOpPure *op_LT_324 = SLT(op_ADD_321, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_329 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_330 = NEG(op_LSHIFT_329); + RzILOpPure *op_LSHIFT_335 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_338 = SUB(op_LSHIFT_335, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_339 = ITE(op_LT_324, op_NEG_330, op_SUB_338); + RzILOpEffect *gcc_expr_340 = BRANCH(op_EQ_256, EMPTY(), set_usr_field_call_290); + + // h_tmp394 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_342 = SETL("h_tmp394", cond_339); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_343 = SEQN(2, gcc_expr_340, op_ASSIGN_hybrid_tmp_342); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1)) ? ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1) : h_tmp394) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_183 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_184 = LOGNOT(op_LSHIFT_183); + RzILOpPure *op_AND_185 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_184); + RzILOpPure *op_RSHIFT_260 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_262 = LOGAND(op_RSHIFT_260, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_268 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_271 = LOGAND(op_RSHIFT_268, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_277 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_280 = LOGAND(op_RSHIFT_277, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_283 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_271), DUP(op_AND_271))), CAST(16, MSB(DUP(op_AND_271)), DUP(op_AND_271))), CAST(32, MSB(CAST(16, MSB(op_AND_280), DUP(op_AND_280))), CAST(16, MSB(DUP(op_AND_280)), DUP(op_AND_280)))); + RzILOpPure *op_LSHIFT_286 = SHIFTL0(CAST(64, MSB(op_MUL_283), DUP(op_MUL_283)), SN(32, 1)); + RzILOpPure *op_ADD_287 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_262), DUP(op_AND_262))), CAST(32, MSB(DUP(op_AND_262)), DUP(op_AND_262))), op_LSHIFT_286); + RzILOpPure *cond_344 = ITE(DUP(op_EQ_256), op_ADD_287, VARL("h_tmp394")); + RzILOpPure *op_AND_346 = LOGAND(cond_344, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_350 = SHIFTL0(op_AND_346, SN(32, 0x20)); + RzILOpPure *op_OR_351 = LOGOR(op_AND_185, op_LSHIFT_350); + RzILOpEffect *op_ASSIGN_352 = WRITE_REG(bundle, Rxx_op, op_OR_351); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_353 = SEQN(2, seq_343, op_ASSIGN_352); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_177, seq_353); + return instruction_sequence; +} + +// Rxx += vmpyh(Rs,Rt):sat +RzILOpEffect *hex_il_op_m2_vmac2s_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_108 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_32 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_34 = LOGAND(op_RSHIFT_32, SN(32, 0xffff)); + RzILOpPure *op_MUL_37 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_25), DUP(op_AND_25))), CAST(16, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(CAST(16, MSB(op_AND_34), DUP(op_AND_34))), CAST(16, MSB(DUP(op_AND_34)), DUP(op_AND_34)))); + RzILOpPure *op_LSHIFT_40 = SHIFTL0(CAST(64, MSB(op_MUL_37), DUP(op_MUL_37)), SN(32, 0)); + RzILOpPure *op_ADD_41 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_16), DUP(op_AND_16))), CAST(32, MSB(DUP(op_AND_16)), DUP(op_AND_16))), op_LSHIFT_40); + RzILOpPure *op_RSHIFT_50 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_52 = LOGAND(op_RSHIFT_50, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_58 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_60 = LOGAND(op_RSHIFT_58, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_66 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_68 = LOGAND(op_RSHIFT_66, SN(32, 0xffff)); + RzILOpPure *op_MUL_71 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_60), DUP(op_AND_60))), CAST(16, MSB(DUP(op_AND_60)), DUP(op_AND_60))), CAST(32, MSB(CAST(16, MSB(op_AND_68), DUP(op_AND_68))), CAST(16, MSB(DUP(op_AND_68)), DUP(op_AND_68)))); + RzILOpPure *op_LSHIFT_74 = SHIFTL0(CAST(64, MSB(op_MUL_71), DUP(op_MUL_71)), SN(32, 0)); + RzILOpPure *op_ADD_75 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_52), DUP(op_AND_52))), CAST(32, MSB(DUP(op_AND_52)), DUP(op_AND_52))), op_LSHIFT_74); + RzILOpPure *op_EQ_76 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_41), SN(32, 0), SN(32, 0x20)), op_ADD_75); + RzILOpPure *op_RSHIFT_112 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_114 = LOGAND(op_RSHIFT_112, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_120 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_122 = LOGAND(op_RSHIFT_120, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_128 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_130 = LOGAND(op_RSHIFT_128, SN(32, 0xffff)); + RzILOpPure *op_MUL_133 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_122), DUP(op_AND_122))), CAST(16, MSB(DUP(op_AND_122)), DUP(op_AND_122))), CAST(32, MSB(CAST(16, MSB(op_AND_130), DUP(op_AND_130))), CAST(16, MSB(DUP(op_AND_130)), DUP(op_AND_130)))); + RzILOpPure *op_LSHIFT_136 = SHIFTL0(CAST(64, MSB(op_MUL_133), DUP(op_MUL_133)), SN(32, 0)); + RzILOpPure *op_ADD_137 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_114), DUP(op_AND_114))), CAST(32, MSB(DUP(op_AND_114)), DUP(op_AND_114))), op_LSHIFT_136); + RzILOpPure *op_LT_140 = SLT(op_ADD_137, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_145 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_146 = NEG(op_LSHIFT_145); + RzILOpPure *op_LSHIFT_151 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_154 = SUB(op_LSHIFT_151, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_155 = ITE(op_LT_140, op_NEG_146, op_SUB_154); + RzILOpEffect *gcc_expr_156 = BRANCH(op_EQ_76, EMPTY(), set_usr_field_call_108); + + // h_tmp395 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_158 = SETL("h_tmp395", cond_155); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_159 = SEQN(2, gcc_expr_156, op_ASSIGN_hybrid_tmp_158); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0)) ? ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) : h_tmp395) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_80 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_82 = LOGAND(op_RSHIFT_80, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_88 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_90 = LOGAND(op_RSHIFT_88, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_96 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_98 = LOGAND(op_RSHIFT_96, SN(32, 0xffff)); + RzILOpPure *op_MUL_101 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_90), DUP(op_AND_90))), CAST(16, MSB(DUP(op_AND_90)), DUP(op_AND_90))), CAST(32, MSB(CAST(16, MSB(op_AND_98), DUP(op_AND_98))), CAST(16, MSB(DUP(op_AND_98)), DUP(op_AND_98)))); + RzILOpPure *op_LSHIFT_104 = SHIFTL0(CAST(64, MSB(op_MUL_101), DUP(op_MUL_101)), SN(32, 0)); + RzILOpPure *op_ADD_105 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_82), DUP(op_AND_82))), CAST(32, MSB(DUP(op_AND_82)), DUP(op_AND_82))), op_LSHIFT_104); + RzILOpPure *cond_160 = ITE(DUP(op_EQ_76), op_ADD_105, VARL("h_tmp395")); + RzILOpPure *op_AND_162 = LOGAND(cond_160, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_166 = SHIFTL0(op_AND_162, SN(32, 0)); + RzILOpPure *op_OR_167 = LOGOR(op_AND_7, op_LSHIFT_166); + RzILOpEffect *op_ASSIGN_168 = WRITE_REG(bundle, Rxx_op, op_OR_167); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_169 = SEQN(2, seq_159, op_ASSIGN_168); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_276 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_184 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_186 = LOGAND(op_RSHIFT_184, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_192 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_194 = LOGAND(op_RSHIFT_192, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_200 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_202 = LOGAND(op_RSHIFT_200, SN(32, 0xffff)); + RzILOpPure *op_MUL_205 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_194), DUP(op_AND_194))), CAST(16, MSB(DUP(op_AND_194)), DUP(op_AND_194))), CAST(32, MSB(CAST(16, MSB(op_AND_202), DUP(op_AND_202))), CAST(16, MSB(DUP(op_AND_202)), DUP(op_AND_202)))); + RzILOpPure *op_LSHIFT_208 = SHIFTL0(CAST(64, MSB(op_MUL_205), DUP(op_MUL_205)), SN(32, 0)); + RzILOpPure *op_ADD_209 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_186), DUP(op_AND_186))), CAST(32, MSB(DUP(op_AND_186)), DUP(op_AND_186))), op_LSHIFT_208); + RzILOpPure *op_RSHIFT_218 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_220 = LOGAND(op_RSHIFT_218, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_226 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_228 = LOGAND(op_RSHIFT_226, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_234 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_236 = LOGAND(op_RSHIFT_234, SN(32, 0xffff)); + RzILOpPure *op_MUL_239 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_228), DUP(op_AND_228))), CAST(16, MSB(DUP(op_AND_228)), DUP(op_AND_228))), CAST(32, MSB(CAST(16, MSB(op_AND_236), DUP(op_AND_236))), CAST(16, MSB(DUP(op_AND_236)), DUP(op_AND_236)))); + RzILOpPure *op_LSHIFT_242 = SHIFTL0(CAST(64, MSB(op_MUL_239), DUP(op_MUL_239)), SN(32, 0)); + RzILOpPure *op_ADD_243 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_220), DUP(op_AND_220))), CAST(32, MSB(DUP(op_AND_220)), DUP(op_AND_220))), op_LSHIFT_242); + RzILOpPure *op_EQ_244 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_209), SN(32, 0), SN(32, 0x20)), op_ADD_243); + RzILOpPure *op_RSHIFT_280 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_282 = LOGAND(op_RSHIFT_280, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_288 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_290 = LOGAND(op_RSHIFT_288, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_296 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_298 = LOGAND(op_RSHIFT_296, SN(32, 0xffff)); + RzILOpPure *op_MUL_301 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_290), DUP(op_AND_290))), CAST(16, MSB(DUP(op_AND_290)), DUP(op_AND_290))), CAST(32, MSB(CAST(16, MSB(op_AND_298), DUP(op_AND_298))), CAST(16, MSB(DUP(op_AND_298)), DUP(op_AND_298)))); + RzILOpPure *op_LSHIFT_304 = SHIFTL0(CAST(64, MSB(op_MUL_301), DUP(op_MUL_301)), SN(32, 0)); + RzILOpPure *op_ADD_305 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_282), DUP(op_AND_282))), CAST(32, MSB(DUP(op_AND_282)), DUP(op_AND_282))), op_LSHIFT_304); + RzILOpPure *op_LT_308 = SLT(op_ADD_305, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_313 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_314 = NEG(op_LSHIFT_313); + RzILOpPure *op_LSHIFT_319 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_322 = SUB(op_LSHIFT_319, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_323 = ITE(op_LT_308, op_NEG_314, op_SUB_322); + RzILOpEffect *gcc_expr_324 = BRANCH(op_EQ_244, EMPTY(), set_usr_field_call_276); + + // h_tmp396 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_326 = SETL("h_tmp396", cond_323); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_327 = SEQN(2, gcc_expr_324, op_ASSIGN_hybrid_tmp_326); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)) ? ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) : h_tmp396) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_175 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_176 = LOGNOT(op_LSHIFT_175); + RzILOpPure *op_AND_177 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_176); + RzILOpPure *op_RSHIFT_248 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_250 = LOGAND(op_RSHIFT_248, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_256 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_258 = LOGAND(op_RSHIFT_256, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_264 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_266 = LOGAND(op_RSHIFT_264, SN(32, 0xffff)); + RzILOpPure *op_MUL_269 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_258), DUP(op_AND_258))), CAST(16, MSB(DUP(op_AND_258)), DUP(op_AND_258))), CAST(32, MSB(CAST(16, MSB(op_AND_266), DUP(op_AND_266))), CAST(16, MSB(DUP(op_AND_266)), DUP(op_AND_266)))); + RzILOpPure *op_LSHIFT_272 = SHIFTL0(CAST(64, MSB(op_MUL_269), DUP(op_MUL_269)), SN(32, 0)); + RzILOpPure *op_ADD_273 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_250), DUP(op_AND_250))), CAST(32, MSB(DUP(op_AND_250)), DUP(op_AND_250))), op_LSHIFT_272); + RzILOpPure *cond_328 = ITE(DUP(op_EQ_244), op_ADD_273, VARL("h_tmp396")); + RzILOpPure *op_AND_330 = LOGAND(cond_328, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_334 = SHIFTL0(op_AND_330, SN(32, 0x20)); + RzILOpPure *op_OR_335 = LOGOR(op_AND_177, op_LSHIFT_334); + RzILOpEffect *op_ASSIGN_336 = WRITE_REG(bundle, Rxx_op, op_OR_335); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_337 = SEQN(2, seq_327, op_ASSIGN_336); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_169, seq_337); + return instruction_sequence; +} + +// Rxx += vmpyh(Rs,Rt):<<1:sat +RzILOpEffect *hex_il_op_m2_vmac2s_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_108 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_32 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_34 = LOGAND(op_RSHIFT_32, SN(32, 0xffff)); + RzILOpPure *op_MUL_37 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_25), DUP(op_AND_25))), CAST(16, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(32, MSB(CAST(16, MSB(op_AND_34), DUP(op_AND_34))), CAST(16, MSB(DUP(op_AND_34)), DUP(op_AND_34)))); + RzILOpPure *op_LSHIFT_40 = SHIFTL0(CAST(64, MSB(op_MUL_37), DUP(op_MUL_37)), SN(32, 1)); + RzILOpPure *op_ADD_41 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_16), DUP(op_AND_16))), CAST(32, MSB(DUP(op_AND_16)), DUP(op_AND_16))), op_LSHIFT_40); + RzILOpPure *op_RSHIFT_50 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_52 = LOGAND(op_RSHIFT_50, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_58 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_60 = LOGAND(op_RSHIFT_58, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_66 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_68 = LOGAND(op_RSHIFT_66, SN(32, 0xffff)); + RzILOpPure *op_MUL_71 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_60), DUP(op_AND_60))), CAST(16, MSB(DUP(op_AND_60)), DUP(op_AND_60))), CAST(32, MSB(CAST(16, MSB(op_AND_68), DUP(op_AND_68))), CAST(16, MSB(DUP(op_AND_68)), DUP(op_AND_68)))); + RzILOpPure *op_LSHIFT_74 = SHIFTL0(CAST(64, MSB(op_MUL_71), DUP(op_MUL_71)), SN(32, 1)); + RzILOpPure *op_ADD_75 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_52), DUP(op_AND_52))), CAST(32, MSB(DUP(op_AND_52)), DUP(op_AND_52))), op_LSHIFT_74); + RzILOpPure *op_EQ_76 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_41), SN(32, 0), SN(32, 0x20)), op_ADD_75); + RzILOpPure *op_RSHIFT_112 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_114 = LOGAND(op_RSHIFT_112, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_120 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_122 = LOGAND(op_RSHIFT_120, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_128 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_130 = LOGAND(op_RSHIFT_128, SN(32, 0xffff)); + RzILOpPure *op_MUL_133 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_122), DUP(op_AND_122))), CAST(16, MSB(DUP(op_AND_122)), DUP(op_AND_122))), CAST(32, MSB(CAST(16, MSB(op_AND_130), DUP(op_AND_130))), CAST(16, MSB(DUP(op_AND_130)), DUP(op_AND_130)))); + RzILOpPure *op_LSHIFT_136 = SHIFTL0(CAST(64, MSB(op_MUL_133), DUP(op_MUL_133)), SN(32, 1)); + RzILOpPure *op_ADD_137 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_114), DUP(op_AND_114))), CAST(32, MSB(DUP(op_AND_114)), DUP(op_AND_114))), op_LSHIFT_136); + RzILOpPure *op_LT_140 = SLT(op_ADD_137, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_145 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_146 = NEG(op_LSHIFT_145); + RzILOpPure *op_LSHIFT_151 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_154 = SUB(op_LSHIFT_151, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_155 = ITE(op_LT_140, op_NEG_146, op_SUB_154); + RzILOpEffect *gcc_expr_156 = BRANCH(op_EQ_76, EMPTY(), set_usr_field_call_108); + + // h_tmp397 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_158 = SETL("h_tmp397", cond_155); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_159 = SEQN(2, gcc_expr_156, op_ASSIGN_hybrid_tmp_158); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)) ? ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) : h_tmp397) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_80 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_82 = LOGAND(op_RSHIFT_80, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_88 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_90 = LOGAND(op_RSHIFT_88, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_96 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_98 = LOGAND(op_RSHIFT_96, SN(32, 0xffff)); + RzILOpPure *op_MUL_101 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_90), DUP(op_AND_90))), CAST(16, MSB(DUP(op_AND_90)), DUP(op_AND_90))), CAST(32, MSB(CAST(16, MSB(op_AND_98), DUP(op_AND_98))), CAST(16, MSB(DUP(op_AND_98)), DUP(op_AND_98)))); + RzILOpPure *op_LSHIFT_104 = SHIFTL0(CAST(64, MSB(op_MUL_101), DUP(op_MUL_101)), SN(32, 1)); + RzILOpPure *op_ADD_105 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_82), DUP(op_AND_82))), CAST(32, MSB(DUP(op_AND_82)), DUP(op_AND_82))), op_LSHIFT_104); + RzILOpPure *cond_160 = ITE(DUP(op_EQ_76), op_ADD_105, VARL("h_tmp397")); + RzILOpPure *op_AND_162 = LOGAND(cond_160, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_166 = SHIFTL0(op_AND_162, SN(32, 0)); + RzILOpPure *op_OR_167 = LOGOR(op_AND_7, op_LSHIFT_166); + RzILOpEffect *op_ASSIGN_168 = WRITE_REG(bundle, Rxx_op, op_OR_167); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_169 = SEQN(2, seq_159, op_ASSIGN_168); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_276 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_184 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_186 = LOGAND(op_RSHIFT_184, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_192 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_194 = LOGAND(op_RSHIFT_192, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_200 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_202 = LOGAND(op_RSHIFT_200, SN(32, 0xffff)); + RzILOpPure *op_MUL_205 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_194), DUP(op_AND_194))), CAST(16, MSB(DUP(op_AND_194)), DUP(op_AND_194))), CAST(32, MSB(CAST(16, MSB(op_AND_202), DUP(op_AND_202))), CAST(16, MSB(DUP(op_AND_202)), DUP(op_AND_202)))); + RzILOpPure *op_LSHIFT_208 = SHIFTL0(CAST(64, MSB(op_MUL_205), DUP(op_MUL_205)), SN(32, 1)); + RzILOpPure *op_ADD_209 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_186), DUP(op_AND_186))), CAST(32, MSB(DUP(op_AND_186)), DUP(op_AND_186))), op_LSHIFT_208); + RzILOpPure *op_RSHIFT_218 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_220 = LOGAND(op_RSHIFT_218, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_226 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_228 = LOGAND(op_RSHIFT_226, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_234 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_236 = LOGAND(op_RSHIFT_234, SN(32, 0xffff)); + RzILOpPure *op_MUL_239 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_228), DUP(op_AND_228))), CAST(16, MSB(DUP(op_AND_228)), DUP(op_AND_228))), CAST(32, MSB(CAST(16, MSB(op_AND_236), DUP(op_AND_236))), CAST(16, MSB(DUP(op_AND_236)), DUP(op_AND_236)))); + RzILOpPure *op_LSHIFT_242 = SHIFTL0(CAST(64, MSB(op_MUL_239), DUP(op_MUL_239)), SN(32, 1)); + RzILOpPure *op_ADD_243 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_220), DUP(op_AND_220))), CAST(32, MSB(DUP(op_AND_220)), DUP(op_AND_220))), op_LSHIFT_242); + RzILOpPure *op_EQ_244 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_209), SN(32, 0), SN(32, 0x20)), op_ADD_243); + RzILOpPure *op_RSHIFT_280 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_282 = LOGAND(op_RSHIFT_280, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_288 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_290 = LOGAND(op_RSHIFT_288, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_296 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_298 = LOGAND(op_RSHIFT_296, SN(32, 0xffff)); + RzILOpPure *op_MUL_301 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_290), DUP(op_AND_290))), CAST(16, MSB(DUP(op_AND_290)), DUP(op_AND_290))), CAST(32, MSB(CAST(16, MSB(op_AND_298), DUP(op_AND_298))), CAST(16, MSB(DUP(op_AND_298)), DUP(op_AND_298)))); + RzILOpPure *op_LSHIFT_304 = SHIFTL0(CAST(64, MSB(op_MUL_301), DUP(op_MUL_301)), SN(32, 1)); + RzILOpPure *op_ADD_305 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_282), DUP(op_AND_282))), CAST(32, MSB(DUP(op_AND_282)), DUP(op_AND_282))), op_LSHIFT_304); + RzILOpPure *op_LT_308 = SLT(op_ADD_305, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_313 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_314 = NEG(op_LSHIFT_313); + RzILOpPure *op_LSHIFT_319 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_322 = SUB(op_LSHIFT_319, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_323 = ITE(op_LT_308, op_NEG_314, op_SUB_322); + RzILOpEffect *gcc_expr_324 = BRANCH(op_EQ_244, EMPTY(), set_usr_field_call_276); + + // h_tmp398 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_326 = SETL("h_tmp398", cond_323); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_327 = SEQN(2, gcc_expr_324, op_ASSIGN_hybrid_tmp_326); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)) ? ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) : h_tmp398) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_175 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_176 = LOGNOT(op_LSHIFT_175); + RzILOpPure *op_AND_177 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_176); + RzILOpPure *op_RSHIFT_248 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_250 = LOGAND(op_RSHIFT_248, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_256 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_258 = LOGAND(op_RSHIFT_256, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_264 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_266 = LOGAND(op_RSHIFT_264, SN(32, 0xffff)); + RzILOpPure *op_MUL_269 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_258), DUP(op_AND_258))), CAST(16, MSB(DUP(op_AND_258)), DUP(op_AND_258))), CAST(32, MSB(CAST(16, MSB(op_AND_266), DUP(op_AND_266))), CAST(16, MSB(DUP(op_AND_266)), DUP(op_AND_266)))); + RzILOpPure *op_LSHIFT_272 = SHIFTL0(CAST(64, MSB(op_MUL_269), DUP(op_MUL_269)), SN(32, 1)); + RzILOpPure *op_ADD_273 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_250), DUP(op_AND_250))), CAST(32, MSB(DUP(op_AND_250)), DUP(op_AND_250))), op_LSHIFT_272); + RzILOpPure *cond_328 = ITE(DUP(op_EQ_244), op_ADD_273, VARL("h_tmp398")); + RzILOpPure *op_AND_330 = LOGAND(cond_328, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_334 = SHIFTL0(op_AND_330, SN(32, 0x20)); + RzILOpPure *op_OR_335 = LOGOR(op_AND_177, op_LSHIFT_334); + RzILOpEffect *op_ASSIGN_336 = WRITE_REG(bundle, Rxx_op, op_OR_335); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_337 = SEQN(2, seq_327, op_ASSIGN_336); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_169, seq_337); + return instruction_sequence; +} + +// Rxx += vmpyhsu(Rs,Rt):sat +RzILOpEffect *hex_il_op_m2_vmac2su_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_114 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_32 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_34 = LOGAND(op_RSHIFT_32, SN(32, 0xffff)); + RzILOpPure *op_MUL_38 = MUL(CAST(32, IL_FALSE, CAST(32, MSB(CAST(16, MSB(op_AND_25), DUP(op_AND_25))), CAST(16, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_34))); + RzILOpPure *op_LSHIFT_42 = SHIFTL0(CAST(64, MSB(CAST(32, IL_FALSE, op_MUL_38)), CAST(32, IL_FALSE, DUP(op_MUL_38))), SN(32, 0)); + RzILOpPure *op_ADD_43 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_16), DUP(op_AND_16))), CAST(32, MSB(DUP(op_AND_16)), DUP(op_AND_16))), op_LSHIFT_42); + RzILOpPure *op_RSHIFT_52 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_54 = LOGAND(op_RSHIFT_52, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_60 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_62 = LOGAND(op_RSHIFT_60, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_68 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_70 = LOGAND(op_RSHIFT_68, SN(32, 0xffff)); + RzILOpPure *op_MUL_74 = MUL(CAST(32, IL_FALSE, CAST(32, MSB(CAST(16, MSB(op_AND_62), DUP(op_AND_62))), CAST(16, MSB(DUP(op_AND_62)), DUP(op_AND_62)))), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_70))); + RzILOpPure *op_LSHIFT_78 = SHIFTL0(CAST(64, MSB(CAST(32, IL_FALSE, op_MUL_74)), CAST(32, IL_FALSE, DUP(op_MUL_74))), SN(32, 0)); + RzILOpPure *op_ADD_79 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_54), DUP(op_AND_54))), CAST(32, MSB(DUP(op_AND_54)), DUP(op_AND_54))), op_LSHIFT_78); + RzILOpPure *op_EQ_80 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_43), SN(32, 0), SN(32, 0x20)), op_ADD_79); + RzILOpPure *op_RSHIFT_118 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_120 = LOGAND(op_RSHIFT_118, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_126 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_128 = LOGAND(op_RSHIFT_126, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_134 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_136 = LOGAND(op_RSHIFT_134, SN(32, 0xffff)); + RzILOpPure *op_MUL_140 = MUL(CAST(32, IL_FALSE, CAST(32, MSB(CAST(16, MSB(op_AND_128), DUP(op_AND_128))), CAST(16, MSB(DUP(op_AND_128)), DUP(op_AND_128)))), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_136))); + RzILOpPure *op_LSHIFT_144 = SHIFTL0(CAST(64, MSB(CAST(32, IL_FALSE, op_MUL_140)), CAST(32, IL_FALSE, DUP(op_MUL_140))), SN(32, 0)); + RzILOpPure *op_ADD_145 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_120), DUP(op_AND_120))), CAST(32, MSB(DUP(op_AND_120)), DUP(op_AND_120))), op_LSHIFT_144); + RzILOpPure *op_LT_148 = SLT(op_ADD_145, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_153 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_154 = NEG(op_LSHIFT_153); + RzILOpPure *op_LSHIFT_159 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_162 = SUB(op_LSHIFT_159, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_163 = ITE(op_LT_148, op_NEG_154, op_SUB_162); + RzILOpEffect *gcc_expr_164 = BRANCH(op_EQ_80, EMPTY(), set_usr_field_call_114); + + // h_tmp399 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_166 = SETL("h_tmp399", cond_163); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_167 = SEQN(2, gcc_expr_164, op_ASSIGN_hybrid_tmp_166); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x0)) ? ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x0) : h_tmp399) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_84 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_86 = LOGAND(op_RSHIFT_84, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_92 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_94 = LOGAND(op_RSHIFT_92, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_100 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_102 = LOGAND(op_RSHIFT_100, SN(32, 0xffff)); + RzILOpPure *op_MUL_106 = MUL(CAST(32, IL_FALSE, CAST(32, MSB(CAST(16, MSB(op_AND_94), DUP(op_AND_94))), CAST(16, MSB(DUP(op_AND_94)), DUP(op_AND_94)))), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_102))); + RzILOpPure *op_LSHIFT_110 = SHIFTL0(CAST(64, MSB(CAST(32, IL_FALSE, op_MUL_106)), CAST(32, IL_FALSE, DUP(op_MUL_106))), SN(32, 0)); + RzILOpPure *op_ADD_111 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_86), DUP(op_AND_86))), CAST(32, MSB(DUP(op_AND_86)), DUP(op_AND_86))), op_LSHIFT_110); + RzILOpPure *cond_168 = ITE(DUP(op_EQ_80), op_ADD_111, VARL("h_tmp399")); + RzILOpPure *op_AND_170 = LOGAND(cond_168, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_174 = SHIFTL0(op_AND_170, SN(32, 0)); + RzILOpPure *op_OR_175 = LOGOR(op_AND_7, op_LSHIFT_174); + RzILOpEffect *op_ASSIGN_176 = WRITE_REG(bundle, Rxx_op, op_OR_175); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_177 = SEQN(2, seq_167, op_ASSIGN_176); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_290 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_192 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_194 = LOGAND(op_RSHIFT_192, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_200 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_202 = LOGAND(op_RSHIFT_200, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_208 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_210 = LOGAND(op_RSHIFT_208, SN(32, 0xffff)); + RzILOpPure *op_MUL_214 = MUL(CAST(32, IL_FALSE, CAST(32, MSB(CAST(16, MSB(op_AND_202), DUP(op_AND_202))), CAST(16, MSB(DUP(op_AND_202)), DUP(op_AND_202)))), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_210))); + RzILOpPure *op_LSHIFT_218 = SHIFTL0(CAST(64, MSB(CAST(32, IL_FALSE, op_MUL_214)), CAST(32, IL_FALSE, DUP(op_MUL_214))), SN(32, 0)); + RzILOpPure *op_ADD_219 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_194), DUP(op_AND_194))), CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))), op_LSHIFT_218); + RzILOpPure *op_RSHIFT_228 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_230 = LOGAND(op_RSHIFT_228, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_236 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_238 = LOGAND(op_RSHIFT_236, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_244 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_246 = LOGAND(op_RSHIFT_244, SN(32, 0xffff)); + RzILOpPure *op_MUL_250 = MUL(CAST(32, IL_FALSE, CAST(32, MSB(CAST(16, MSB(op_AND_238), DUP(op_AND_238))), CAST(16, MSB(DUP(op_AND_238)), DUP(op_AND_238)))), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_246))); + RzILOpPure *op_LSHIFT_254 = SHIFTL0(CAST(64, MSB(CAST(32, IL_FALSE, op_MUL_250)), CAST(32, IL_FALSE, DUP(op_MUL_250))), SN(32, 0)); + RzILOpPure *op_ADD_255 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_230), DUP(op_AND_230))), CAST(32, MSB(DUP(op_AND_230)), DUP(op_AND_230))), op_LSHIFT_254); + RzILOpPure *op_EQ_256 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_219), SN(32, 0), SN(32, 0x20)), op_ADD_255); + RzILOpPure *op_RSHIFT_294 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_296 = LOGAND(op_RSHIFT_294, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_302 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_304 = LOGAND(op_RSHIFT_302, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_310 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_312 = LOGAND(op_RSHIFT_310, SN(32, 0xffff)); + RzILOpPure *op_MUL_316 = MUL(CAST(32, IL_FALSE, CAST(32, MSB(CAST(16, MSB(op_AND_304), DUP(op_AND_304))), CAST(16, MSB(DUP(op_AND_304)), DUP(op_AND_304)))), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_312))); + RzILOpPure *op_LSHIFT_320 = SHIFTL0(CAST(64, MSB(CAST(32, IL_FALSE, op_MUL_316)), CAST(32, IL_FALSE, DUP(op_MUL_316))), SN(32, 0)); + RzILOpPure *op_ADD_321 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_296), DUP(op_AND_296))), CAST(32, MSB(DUP(op_AND_296)), DUP(op_AND_296))), op_LSHIFT_320); + RzILOpPure *op_LT_324 = SLT(op_ADD_321, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_329 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_330 = NEG(op_LSHIFT_329); + RzILOpPure *op_LSHIFT_335 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_338 = SUB(op_LSHIFT_335, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_339 = ITE(op_LT_324, op_NEG_330, op_SUB_338); + RzILOpEffect *gcc_expr_340 = BRANCH(op_EQ_256, EMPTY(), set_usr_field_call_290); + + // h_tmp400 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_342 = SETL("h_tmp400", cond_339); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_343 = SEQN(2, gcc_expr_340, op_ASSIGN_hybrid_tmp_342); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x0)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x0)) ? ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x0) : h_tmp400) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_183 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_184 = LOGNOT(op_LSHIFT_183); + RzILOpPure *op_AND_185 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_184); + RzILOpPure *op_RSHIFT_260 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_262 = LOGAND(op_RSHIFT_260, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_268 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_270 = LOGAND(op_RSHIFT_268, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_276 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_278 = LOGAND(op_RSHIFT_276, SN(32, 0xffff)); + RzILOpPure *op_MUL_282 = MUL(CAST(32, IL_FALSE, CAST(32, MSB(CAST(16, MSB(op_AND_270), DUP(op_AND_270))), CAST(16, MSB(DUP(op_AND_270)), DUP(op_AND_270)))), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_278))); + RzILOpPure *op_LSHIFT_286 = SHIFTL0(CAST(64, MSB(CAST(32, IL_FALSE, op_MUL_282)), CAST(32, IL_FALSE, DUP(op_MUL_282))), SN(32, 0)); + RzILOpPure *op_ADD_287 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_262), DUP(op_AND_262))), CAST(32, MSB(DUP(op_AND_262)), DUP(op_AND_262))), op_LSHIFT_286); + RzILOpPure *cond_344 = ITE(DUP(op_EQ_256), op_ADD_287, VARL("h_tmp400")); + RzILOpPure *op_AND_346 = LOGAND(cond_344, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_350 = SHIFTL0(op_AND_346, SN(32, 0x20)); + RzILOpPure *op_OR_351 = LOGOR(op_AND_185, op_LSHIFT_350); + RzILOpEffect *op_ASSIGN_352 = WRITE_REG(bundle, Rxx_op, op_OR_351); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_353 = SEQN(2, seq_343, op_ASSIGN_352); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_177, seq_353); + return instruction_sequence; +} + +// Rxx += vmpyhsu(Rs,Rt):<<1:sat +RzILOpEffect *hex_il_op_m2_vmac2su_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_114 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_32 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_34 = LOGAND(op_RSHIFT_32, SN(32, 0xffff)); + RzILOpPure *op_MUL_38 = MUL(CAST(32, IL_FALSE, CAST(32, MSB(CAST(16, MSB(op_AND_25), DUP(op_AND_25))), CAST(16, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_34))); + RzILOpPure *op_LSHIFT_42 = SHIFTL0(CAST(64, MSB(CAST(32, IL_FALSE, op_MUL_38)), CAST(32, IL_FALSE, DUP(op_MUL_38))), SN(32, 1)); + RzILOpPure *op_ADD_43 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_16), DUP(op_AND_16))), CAST(32, MSB(DUP(op_AND_16)), DUP(op_AND_16))), op_LSHIFT_42); + RzILOpPure *op_RSHIFT_52 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_54 = LOGAND(op_RSHIFT_52, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_60 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_62 = LOGAND(op_RSHIFT_60, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_68 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_70 = LOGAND(op_RSHIFT_68, SN(32, 0xffff)); + RzILOpPure *op_MUL_74 = MUL(CAST(32, IL_FALSE, CAST(32, MSB(CAST(16, MSB(op_AND_62), DUP(op_AND_62))), CAST(16, MSB(DUP(op_AND_62)), DUP(op_AND_62)))), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_70))); + RzILOpPure *op_LSHIFT_78 = SHIFTL0(CAST(64, MSB(CAST(32, IL_FALSE, op_MUL_74)), CAST(32, IL_FALSE, DUP(op_MUL_74))), SN(32, 1)); + RzILOpPure *op_ADD_79 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_54), DUP(op_AND_54))), CAST(32, MSB(DUP(op_AND_54)), DUP(op_AND_54))), op_LSHIFT_78); + RzILOpPure *op_EQ_80 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_43), SN(32, 0), SN(32, 0x20)), op_ADD_79); + RzILOpPure *op_RSHIFT_118 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_120 = LOGAND(op_RSHIFT_118, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_126 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_128 = LOGAND(op_RSHIFT_126, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_134 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_136 = LOGAND(op_RSHIFT_134, SN(32, 0xffff)); + RzILOpPure *op_MUL_140 = MUL(CAST(32, IL_FALSE, CAST(32, MSB(CAST(16, MSB(op_AND_128), DUP(op_AND_128))), CAST(16, MSB(DUP(op_AND_128)), DUP(op_AND_128)))), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_136))); + RzILOpPure *op_LSHIFT_144 = SHIFTL0(CAST(64, MSB(CAST(32, IL_FALSE, op_MUL_140)), CAST(32, IL_FALSE, DUP(op_MUL_140))), SN(32, 1)); + RzILOpPure *op_ADD_145 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_120), DUP(op_AND_120))), CAST(32, MSB(DUP(op_AND_120)), DUP(op_AND_120))), op_LSHIFT_144); + RzILOpPure *op_LT_148 = SLT(op_ADD_145, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_153 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_154 = NEG(op_LSHIFT_153); + RzILOpPure *op_LSHIFT_159 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_162 = SUB(op_LSHIFT_159, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_163 = ITE(op_LT_148, op_NEG_154, op_SUB_162); + RzILOpEffect *gcc_expr_164 = BRANCH(op_EQ_80, EMPTY(), set_usr_field_call_114); + + // h_tmp401 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_166 = SETL("h_tmp401", cond_163); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_167 = SEQN(2, gcc_expr_164, op_ASSIGN_hybrid_tmp_166); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x1)) ? ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x1) : h_tmp401) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_84 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_86 = LOGAND(op_RSHIFT_84, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_92 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_94 = LOGAND(op_RSHIFT_92, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_100 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_102 = LOGAND(op_RSHIFT_100, SN(32, 0xffff)); + RzILOpPure *op_MUL_106 = MUL(CAST(32, IL_FALSE, CAST(32, MSB(CAST(16, MSB(op_AND_94), DUP(op_AND_94))), CAST(16, MSB(DUP(op_AND_94)), DUP(op_AND_94)))), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_102))); + RzILOpPure *op_LSHIFT_110 = SHIFTL0(CAST(64, MSB(CAST(32, IL_FALSE, op_MUL_106)), CAST(32, IL_FALSE, DUP(op_MUL_106))), SN(32, 1)); + RzILOpPure *op_ADD_111 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_86), DUP(op_AND_86))), CAST(32, MSB(DUP(op_AND_86)), DUP(op_AND_86))), op_LSHIFT_110); + RzILOpPure *cond_168 = ITE(DUP(op_EQ_80), op_ADD_111, VARL("h_tmp401")); + RzILOpPure *op_AND_170 = LOGAND(cond_168, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_174 = SHIFTL0(op_AND_170, SN(32, 0)); + RzILOpPure *op_OR_175 = LOGOR(op_AND_7, op_LSHIFT_174); + RzILOpEffect *op_ASSIGN_176 = WRITE_REG(bundle, Rxx_op, op_OR_175); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_177 = SEQN(2, seq_167, op_ASSIGN_176); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_290 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_192 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_194 = LOGAND(op_RSHIFT_192, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_200 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_202 = LOGAND(op_RSHIFT_200, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_208 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_210 = LOGAND(op_RSHIFT_208, SN(32, 0xffff)); + RzILOpPure *op_MUL_214 = MUL(CAST(32, IL_FALSE, CAST(32, MSB(CAST(16, MSB(op_AND_202), DUP(op_AND_202))), CAST(16, MSB(DUP(op_AND_202)), DUP(op_AND_202)))), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_210))); + RzILOpPure *op_LSHIFT_218 = SHIFTL0(CAST(64, MSB(CAST(32, IL_FALSE, op_MUL_214)), CAST(32, IL_FALSE, DUP(op_MUL_214))), SN(32, 1)); + RzILOpPure *op_ADD_219 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_194), DUP(op_AND_194))), CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))), op_LSHIFT_218); + RzILOpPure *op_RSHIFT_228 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_230 = LOGAND(op_RSHIFT_228, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_236 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_238 = LOGAND(op_RSHIFT_236, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_244 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_246 = LOGAND(op_RSHIFT_244, SN(32, 0xffff)); + RzILOpPure *op_MUL_250 = MUL(CAST(32, IL_FALSE, CAST(32, MSB(CAST(16, MSB(op_AND_238), DUP(op_AND_238))), CAST(16, MSB(DUP(op_AND_238)), DUP(op_AND_238)))), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_246))); + RzILOpPure *op_LSHIFT_254 = SHIFTL0(CAST(64, MSB(CAST(32, IL_FALSE, op_MUL_250)), CAST(32, IL_FALSE, DUP(op_MUL_250))), SN(32, 1)); + RzILOpPure *op_ADD_255 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_230), DUP(op_AND_230))), CAST(32, MSB(DUP(op_AND_230)), DUP(op_AND_230))), op_LSHIFT_254); + RzILOpPure *op_EQ_256 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_219), SN(32, 0), SN(32, 0x20)), op_ADD_255); + RzILOpPure *op_RSHIFT_294 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_296 = LOGAND(op_RSHIFT_294, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_302 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_304 = LOGAND(op_RSHIFT_302, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_310 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_312 = LOGAND(op_RSHIFT_310, SN(32, 0xffff)); + RzILOpPure *op_MUL_316 = MUL(CAST(32, IL_FALSE, CAST(32, MSB(CAST(16, MSB(op_AND_304), DUP(op_AND_304))), CAST(16, MSB(DUP(op_AND_304)), DUP(op_AND_304)))), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_312))); + RzILOpPure *op_LSHIFT_320 = SHIFTL0(CAST(64, MSB(CAST(32, IL_FALSE, op_MUL_316)), CAST(32, IL_FALSE, DUP(op_MUL_316))), SN(32, 1)); + RzILOpPure *op_ADD_321 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_296), DUP(op_AND_296))), CAST(32, MSB(DUP(op_AND_296)), DUP(op_AND_296))), op_LSHIFT_320); + RzILOpPure *op_LT_324 = SLT(op_ADD_321, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_329 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_330 = NEG(op_LSHIFT_329); + RzILOpPure *op_LSHIFT_335 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_338 = SUB(op_LSHIFT_335, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_339 = ITE(op_LT_324, op_NEG_330, op_SUB_338); + RzILOpEffect *gcc_expr_340 = BRANCH(op_EQ_256, EMPTY(), set_usr_field_call_290); + + // h_tmp402 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_342 = SETL("h_tmp402", cond_339); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_343 = SEQN(2, gcc_expr_340, op_ASSIGN_hybrid_tmp_342); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x1)) ? ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x1) : h_tmp402) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_183 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_184 = LOGNOT(op_LSHIFT_183); + RzILOpPure *op_AND_185 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_184); + RzILOpPure *op_RSHIFT_260 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_262 = LOGAND(op_RSHIFT_260, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_268 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_270 = LOGAND(op_RSHIFT_268, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_276 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_278 = LOGAND(op_RSHIFT_276, SN(32, 0xffff)); + RzILOpPure *op_MUL_282 = MUL(CAST(32, IL_FALSE, CAST(32, MSB(CAST(16, MSB(op_AND_270), DUP(op_AND_270))), CAST(16, MSB(DUP(op_AND_270)), DUP(op_AND_270)))), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_278))); + RzILOpPure *op_LSHIFT_286 = SHIFTL0(CAST(64, MSB(CAST(32, IL_FALSE, op_MUL_282)), CAST(32, IL_FALSE, DUP(op_MUL_282))), SN(32, 1)); + RzILOpPure *op_ADD_287 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_262), DUP(op_AND_262))), CAST(32, MSB(DUP(op_AND_262)), DUP(op_AND_262))), op_LSHIFT_286); + RzILOpPure *cond_344 = ITE(DUP(op_EQ_256), op_ADD_287, VARL("h_tmp402")); + RzILOpPure *op_AND_346 = LOGAND(cond_344, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_350 = SHIFTL0(op_AND_346, SN(32, 0x20)); + RzILOpPure *op_OR_351 = LOGOR(op_AND_185, op_LSHIFT_350); + RzILOpEffect *op_ASSIGN_352 = WRITE_REG(bundle, Rxx_op, op_OR_351); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_353 = SEQN(2, seq_343, op_ASSIGN_352); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_177, seq_353); + return instruction_sequence; +} + +// Rdd = vmpyeh(Rss,Rtt):sat +RzILOpEffect *hex_il_op_m2_vmpy2es_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_87 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_15, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_25 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_28 = LOGAND(op_RSHIFT_25, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_31 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(CAST(16, MSB(op_AND_28), DUP(op_AND_28))), CAST(16, MSB(DUP(op_AND_28)), DUP(op_AND_28)))); + RzILOpPure *op_LSHIFT_34 = SHIFTL0(CAST(64, MSB(op_MUL_31), DUP(op_MUL_31)), SN(32, 0)); + RzILOpPure *op_RSHIFT_43 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_46 = LOGAND(op_RSHIFT_43, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_52 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_55 = LOGAND(op_RSHIFT_52, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_58 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_46), DUP(op_AND_46))), CAST(16, MSB(DUP(op_AND_46)), DUP(op_AND_46))), CAST(32, MSB(CAST(16, MSB(op_AND_55), DUP(op_AND_55))), CAST(16, MSB(DUP(op_AND_55)), DUP(op_AND_55)))); + RzILOpPure *op_LSHIFT_61 = SHIFTL0(CAST(64, MSB(op_MUL_58), DUP(op_MUL_58)), SN(32, 0)); + RzILOpPure *op_EQ_62 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_LSHIFT_34), SN(32, 0), SN(32, 0x20)), op_LSHIFT_61); + RzILOpPure *op_RSHIFT_91 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_94 = LOGAND(op_RSHIFT_91, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_100 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_103 = LOGAND(op_RSHIFT_100, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_106 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_94), DUP(op_AND_94))), CAST(16, MSB(DUP(op_AND_94)), DUP(op_AND_94))), CAST(32, MSB(CAST(16, MSB(op_AND_103), DUP(op_AND_103))), CAST(16, MSB(DUP(op_AND_103)), DUP(op_AND_103)))); + RzILOpPure *op_LSHIFT_109 = SHIFTL0(CAST(64, MSB(op_MUL_106), DUP(op_MUL_106)), SN(32, 0)); + RzILOpPure *op_LT_112 = SLT(op_LSHIFT_109, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_117 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_118 = NEG(op_LSHIFT_117); + RzILOpPure *op_LSHIFT_123 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_126 = SUB(op_LSHIFT_123, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_127 = ITE(op_LT_112, op_NEG_118, op_SUB_126); + RzILOpEffect *gcc_expr_128 = BRANCH(op_EQ_62, EMPTY(), set_usr_field_call_87); + + // h_tmp403 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_130 = SETL("h_tmp403", cond_127); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_131 = SEQN(2, gcc_expr_128, op_ASSIGN_hybrid_tmp_130); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0)) ? (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x0) : h_tmp403) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_66 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_69 = LOGAND(op_RSHIFT_66, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_75 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_78 = LOGAND(op_RSHIFT_75, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_81 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_69), DUP(op_AND_69))), CAST(16, MSB(DUP(op_AND_69)), DUP(op_AND_69))), CAST(32, MSB(CAST(16, MSB(op_AND_78), DUP(op_AND_78))), CAST(16, MSB(DUP(op_AND_78)), DUP(op_AND_78)))); + RzILOpPure *op_LSHIFT_84 = SHIFTL0(CAST(64, MSB(op_MUL_81), DUP(op_MUL_81)), SN(32, 0)); + RzILOpPure *cond_132 = ITE(DUP(op_EQ_62), op_LSHIFT_84, VARL("h_tmp403")); + RzILOpPure *op_AND_134 = LOGAND(cond_132, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_138 = SHIFTL0(op_AND_134, SN(32, 0)); + RzILOpPure *op_OR_139 = LOGOR(op_AND_7, op_LSHIFT_138); + RzILOpEffect *op_ASSIGN_140 = WRITE_REG(bundle, Rdd_op, op_OR_139); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_141 = SEQN(2, seq_131, op_ASSIGN_140); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_227 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_156 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_159 = LOGAND(op_RSHIFT_156, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_165 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_168 = LOGAND(op_RSHIFT_165, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_171 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_159), DUP(op_AND_159))), CAST(16, MSB(DUP(op_AND_159)), DUP(op_AND_159))), CAST(32, MSB(CAST(16, MSB(op_AND_168), DUP(op_AND_168))), CAST(16, MSB(DUP(op_AND_168)), DUP(op_AND_168)))); + RzILOpPure *op_LSHIFT_174 = SHIFTL0(CAST(64, MSB(op_MUL_171), DUP(op_MUL_171)), SN(32, 0)); + RzILOpPure *op_RSHIFT_183 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_186 = LOGAND(op_RSHIFT_183, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_192 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_195 = LOGAND(op_RSHIFT_192, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_198 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_186), DUP(op_AND_186))), CAST(16, MSB(DUP(op_AND_186)), DUP(op_AND_186))), CAST(32, MSB(CAST(16, MSB(op_AND_195), DUP(op_AND_195))), CAST(16, MSB(DUP(op_AND_195)), DUP(op_AND_195)))); + RzILOpPure *op_LSHIFT_201 = SHIFTL0(CAST(64, MSB(op_MUL_198), DUP(op_MUL_198)), SN(32, 0)); + RzILOpPure *op_EQ_202 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_LSHIFT_174), SN(32, 0), SN(32, 0x20)), op_LSHIFT_201); + RzILOpPure *op_RSHIFT_231 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_234 = LOGAND(op_RSHIFT_231, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_240 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_243 = LOGAND(op_RSHIFT_240, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_246 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_234), DUP(op_AND_234))), CAST(16, MSB(DUP(op_AND_234)), DUP(op_AND_234))), CAST(32, MSB(CAST(16, MSB(op_AND_243), DUP(op_AND_243))), CAST(16, MSB(DUP(op_AND_243)), DUP(op_AND_243)))); + RzILOpPure *op_LSHIFT_249 = SHIFTL0(CAST(64, MSB(op_MUL_246), DUP(op_MUL_246)), SN(32, 0)); + RzILOpPure *op_LT_252 = SLT(op_LSHIFT_249, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_257 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_258 = NEG(op_LSHIFT_257); + RzILOpPure *op_LSHIFT_263 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_266 = SUB(op_LSHIFT_263, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_267 = ITE(op_LT_252, op_NEG_258, op_SUB_266); + RzILOpEffect *gcc_expr_268 = BRANCH(op_EQ_202, EMPTY(), set_usr_field_call_227); + + // h_tmp404 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_270 = SETL("h_tmp404", cond_267); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_271 = SEQN(2, gcc_expr_268, op_ASSIGN_hybrid_tmp_270); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0)) ? (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x0) : h_tmp404) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_147 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_148 = LOGNOT(op_LSHIFT_147); + RzILOpPure *op_AND_149 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_148); + RzILOpPure *op_RSHIFT_206 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_209 = LOGAND(op_RSHIFT_206, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_215 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_218 = LOGAND(op_RSHIFT_215, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_221 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_209), DUP(op_AND_209))), CAST(16, MSB(DUP(op_AND_209)), DUP(op_AND_209))), CAST(32, MSB(CAST(16, MSB(op_AND_218), DUP(op_AND_218))), CAST(16, MSB(DUP(op_AND_218)), DUP(op_AND_218)))); + RzILOpPure *op_LSHIFT_224 = SHIFTL0(CAST(64, MSB(op_MUL_221), DUP(op_MUL_221)), SN(32, 0)); + RzILOpPure *cond_272 = ITE(DUP(op_EQ_202), op_LSHIFT_224, VARL("h_tmp404")); + RzILOpPure *op_AND_274 = LOGAND(cond_272, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_278 = SHIFTL0(op_AND_274, SN(32, 0x20)); + RzILOpPure *op_OR_279 = LOGOR(op_AND_149, op_LSHIFT_278); + RzILOpEffect *op_ASSIGN_280 = WRITE_REG(bundle, Rdd_op, op_OR_279); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_281 = SEQN(2, seq_271, op_ASSIGN_280); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_141, seq_281); + return instruction_sequence; +} + +// Rdd = vmpyeh(Rss,Rtt):<<1:sat +RzILOpEffect *hex_il_op_m2_vmpy2es_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_87 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_15, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_25 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_28 = LOGAND(op_RSHIFT_25, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_31 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(CAST(16, MSB(op_AND_28), DUP(op_AND_28))), CAST(16, MSB(DUP(op_AND_28)), DUP(op_AND_28)))); + RzILOpPure *op_LSHIFT_34 = SHIFTL0(CAST(64, MSB(op_MUL_31), DUP(op_MUL_31)), SN(32, 1)); + RzILOpPure *op_RSHIFT_43 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_46 = LOGAND(op_RSHIFT_43, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_52 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_55 = LOGAND(op_RSHIFT_52, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_58 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_46), DUP(op_AND_46))), CAST(16, MSB(DUP(op_AND_46)), DUP(op_AND_46))), CAST(32, MSB(CAST(16, MSB(op_AND_55), DUP(op_AND_55))), CAST(16, MSB(DUP(op_AND_55)), DUP(op_AND_55)))); + RzILOpPure *op_LSHIFT_61 = SHIFTL0(CAST(64, MSB(op_MUL_58), DUP(op_MUL_58)), SN(32, 1)); + RzILOpPure *op_EQ_62 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_LSHIFT_34), SN(32, 0), SN(32, 0x20)), op_LSHIFT_61); + RzILOpPure *op_RSHIFT_91 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_94 = LOGAND(op_RSHIFT_91, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_100 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_103 = LOGAND(op_RSHIFT_100, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_106 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_94), DUP(op_AND_94))), CAST(16, MSB(DUP(op_AND_94)), DUP(op_AND_94))), CAST(32, MSB(CAST(16, MSB(op_AND_103), DUP(op_AND_103))), CAST(16, MSB(DUP(op_AND_103)), DUP(op_AND_103)))); + RzILOpPure *op_LSHIFT_109 = SHIFTL0(CAST(64, MSB(op_MUL_106), DUP(op_MUL_106)), SN(32, 1)); + RzILOpPure *op_LT_112 = SLT(op_LSHIFT_109, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_117 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_118 = NEG(op_LSHIFT_117); + RzILOpPure *op_LSHIFT_123 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_126 = SUB(op_LSHIFT_123, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_127 = ITE(op_LT_112, op_NEG_118, op_SUB_126); + RzILOpEffect *gcc_expr_128 = BRANCH(op_EQ_62, EMPTY(), set_usr_field_call_87); + + // h_tmp405 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_130 = SETL("h_tmp405", cond_127); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_131 = SEQN(2, gcc_expr_128, op_ASSIGN_hybrid_tmp_130); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1)) ? (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) << 0x1) : h_tmp405) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_66 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_69 = LOGAND(op_RSHIFT_66, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_75 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_78 = LOGAND(op_RSHIFT_75, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_81 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_69), DUP(op_AND_69))), CAST(16, MSB(DUP(op_AND_69)), DUP(op_AND_69))), CAST(32, MSB(CAST(16, MSB(op_AND_78), DUP(op_AND_78))), CAST(16, MSB(DUP(op_AND_78)), DUP(op_AND_78)))); + RzILOpPure *op_LSHIFT_84 = SHIFTL0(CAST(64, MSB(op_MUL_81), DUP(op_MUL_81)), SN(32, 1)); + RzILOpPure *cond_132 = ITE(DUP(op_EQ_62), op_LSHIFT_84, VARL("h_tmp405")); + RzILOpPure *op_AND_134 = LOGAND(cond_132, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_138 = SHIFTL0(op_AND_134, SN(32, 0)); + RzILOpPure *op_OR_139 = LOGOR(op_AND_7, op_LSHIFT_138); + RzILOpEffect *op_ASSIGN_140 = WRITE_REG(bundle, Rdd_op, op_OR_139); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_141 = SEQN(2, seq_131, op_ASSIGN_140); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_227 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_156 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_159 = LOGAND(op_RSHIFT_156, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_165 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_168 = LOGAND(op_RSHIFT_165, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_171 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_159), DUP(op_AND_159))), CAST(16, MSB(DUP(op_AND_159)), DUP(op_AND_159))), CAST(32, MSB(CAST(16, MSB(op_AND_168), DUP(op_AND_168))), CAST(16, MSB(DUP(op_AND_168)), DUP(op_AND_168)))); + RzILOpPure *op_LSHIFT_174 = SHIFTL0(CAST(64, MSB(op_MUL_171), DUP(op_MUL_171)), SN(32, 1)); + RzILOpPure *op_RSHIFT_183 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_186 = LOGAND(op_RSHIFT_183, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_192 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_195 = LOGAND(op_RSHIFT_192, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_198 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_186), DUP(op_AND_186))), CAST(16, MSB(DUP(op_AND_186)), DUP(op_AND_186))), CAST(32, MSB(CAST(16, MSB(op_AND_195), DUP(op_AND_195))), CAST(16, MSB(DUP(op_AND_195)), DUP(op_AND_195)))); + RzILOpPure *op_LSHIFT_201 = SHIFTL0(CAST(64, MSB(op_MUL_198), DUP(op_MUL_198)), SN(32, 1)); + RzILOpPure *op_EQ_202 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_LSHIFT_174), SN(32, 0), SN(32, 0x20)), op_LSHIFT_201); + RzILOpPure *op_RSHIFT_231 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_234 = LOGAND(op_RSHIFT_231, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_240 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_243 = LOGAND(op_RSHIFT_240, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_246 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_234), DUP(op_AND_234))), CAST(16, MSB(DUP(op_AND_234)), DUP(op_AND_234))), CAST(32, MSB(CAST(16, MSB(op_AND_243), DUP(op_AND_243))), CAST(16, MSB(DUP(op_AND_243)), DUP(op_AND_243)))); + RzILOpPure *op_LSHIFT_249 = SHIFTL0(CAST(64, MSB(op_MUL_246), DUP(op_MUL_246)), SN(32, 1)); + RzILOpPure *op_LT_252 = SLT(op_LSHIFT_249, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_257 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_258 = NEG(op_LSHIFT_257); + RzILOpPure *op_LSHIFT_263 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_266 = SUB(op_LSHIFT_263, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_267 = ITE(op_LT_252, op_NEG_258, op_SUB_266); + RzILOpEffect *gcc_expr_268 = BRANCH(op_EQ_202, EMPTY(), set_usr_field_call_227); + + // h_tmp406 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_270 = SETL("h_tmp406", cond_267); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_271 = SEQN(2, gcc_expr_268, op_ASSIGN_hybrid_tmp_270); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1)) ? (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) << 0x1) : h_tmp406) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_147 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_148 = LOGNOT(op_LSHIFT_147); + RzILOpPure *op_AND_149 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_148); + RzILOpPure *op_RSHIFT_206 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_209 = LOGAND(op_RSHIFT_206, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_215 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_218 = LOGAND(op_RSHIFT_215, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_221 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_209), DUP(op_AND_209))), CAST(16, MSB(DUP(op_AND_209)), DUP(op_AND_209))), CAST(32, MSB(CAST(16, MSB(op_AND_218), DUP(op_AND_218))), CAST(16, MSB(DUP(op_AND_218)), DUP(op_AND_218)))); + RzILOpPure *op_LSHIFT_224 = SHIFTL0(CAST(64, MSB(op_MUL_221), DUP(op_MUL_221)), SN(32, 1)); + RzILOpPure *cond_272 = ITE(DUP(op_EQ_202), op_LSHIFT_224, VARL("h_tmp406")); + RzILOpPure *op_AND_274 = LOGAND(cond_272, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_278 = SHIFTL0(op_AND_274, SN(32, 0x20)); + RzILOpPure *op_OR_279 = LOGOR(op_AND_149, op_LSHIFT_278); + RzILOpEffect *op_ASSIGN_280 = WRITE_REG(bundle, Rdd_op, op_OR_279); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_281 = SEQN(2, seq_271, op_ASSIGN_280); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_141, seq_281); + return instruction_sequence; +} + +// Rdd = vmpyh(Rs,Rt):sat +RzILOpEffect *hex_il_op_m2_vmpy2s_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_81 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_17 = LOGAND(op_RSHIFT_15, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_24 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_26 = LOGAND(op_RSHIFT_24, SN(32, 0xffff)); + RzILOpPure *op_MUL_29 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_17), DUP(op_AND_17))), CAST(16, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(CAST(16, MSB(op_AND_26), DUP(op_AND_26))), CAST(16, MSB(DUP(op_AND_26)), DUP(op_AND_26)))); + RzILOpPure *op_LSHIFT_32 = SHIFTL0(CAST(64, MSB(op_MUL_29), DUP(op_MUL_29)), SN(32, 0)); + RzILOpPure *op_RSHIFT_41 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_43 = LOGAND(op_RSHIFT_41, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_49 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_51 = LOGAND(op_RSHIFT_49, SN(32, 0xffff)); + RzILOpPure *op_MUL_54 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_43), DUP(op_AND_43))), CAST(16, MSB(DUP(op_AND_43)), DUP(op_AND_43))), CAST(32, MSB(CAST(16, MSB(op_AND_51), DUP(op_AND_51))), CAST(16, MSB(DUP(op_AND_51)), DUP(op_AND_51)))); + RzILOpPure *op_LSHIFT_57 = SHIFTL0(CAST(64, MSB(op_MUL_54), DUP(op_MUL_54)), SN(32, 0)); + RzILOpPure *op_EQ_58 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_LSHIFT_32), SN(32, 0), SN(32, 0x20)), op_LSHIFT_57); + RzILOpPure *op_RSHIFT_85 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_87 = LOGAND(op_RSHIFT_85, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_93 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_95 = LOGAND(op_RSHIFT_93, SN(32, 0xffff)); + RzILOpPure *op_MUL_98 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_87), DUP(op_AND_87))), CAST(16, MSB(DUP(op_AND_87)), DUP(op_AND_87))), CAST(32, MSB(CAST(16, MSB(op_AND_95), DUP(op_AND_95))), CAST(16, MSB(DUP(op_AND_95)), DUP(op_AND_95)))); + RzILOpPure *op_LSHIFT_101 = SHIFTL0(CAST(64, MSB(op_MUL_98), DUP(op_MUL_98)), SN(32, 0)); + RzILOpPure *op_LT_104 = SLT(op_LSHIFT_101, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_109 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_110 = NEG(op_LSHIFT_109); + RzILOpPure *op_LSHIFT_115 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_118 = SUB(op_LSHIFT_115, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_119 = ITE(op_LT_104, op_NEG_110, op_SUB_118); + RzILOpEffect *gcc_expr_120 = BRANCH(op_EQ_58, EMPTY(), set_usr_field_call_81); + + // h_tmp407 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_122 = SETL("h_tmp407", cond_119); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_123 = SEQN(2, gcc_expr_120, op_ASSIGN_hybrid_tmp_122); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0)) ? (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) : h_tmp407) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_62 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_64 = LOGAND(op_RSHIFT_62, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_70 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_72 = LOGAND(op_RSHIFT_70, SN(32, 0xffff)); + RzILOpPure *op_MUL_75 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_64), DUP(op_AND_64))), CAST(16, MSB(DUP(op_AND_64)), DUP(op_AND_64))), CAST(32, MSB(CAST(16, MSB(op_AND_72), DUP(op_AND_72))), CAST(16, MSB(DUP(op_AND_72)), DUP(op_AND_72)))); + RzILOpPure *op_LSHIFT_78 = SHIFTL0(CAST(64, MSB(op_MUL_75), DUP(op_MUL_75)), SN(32, 0)); + RzILOpPure *cond_124 = ITE(DUP(op_EQ_58), op_LSHIFT_78, VARL("h_tmp407")); + RzILOpPure *op_AND_126 = LOGAND(cond_124, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_130 = SHIFTL0(op_AND_126, SN(32, 0)); + RzILOpPure *op_OR_131 = LOGOR(op_AND_7, op_LSHIFT_130); + RzILOpEffect *op_ASSIGN_132 = WRITE_REG(bundle, Rdd_op, op_OR_131); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_133 = SEQN(2, seq_123, op_ASSIGN_132); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_213 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_148 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_150 = LOGAND(op_RSHIFT_148, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_156 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_158 = LOGAND(op_RSHIFT_156, SN(32, 0xffff)); + RzILOpPure *op_MUL_161 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_150), DUP(op_AND_150))), CAST(16, MSB(DUP(op_AND_150)), DUP(op_AND_150))), CAST(32, MSB(CAST(16, MSB(op_AND_158), DUP(op_AND_158))), CAST(16, MSB(DUP(op_AND_158)), DUP(op_AND_158)))); + RzILOpPure *op_LSHIFT_164 = SHIFTL0(CAST(64, MSB(op_MUL_161), DUP(op_MUL_161)), SN(32, 0)); + RzILOpPure *op_RSHIFT_173 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_175 = LOGAND(op_RSHIFT_173, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_181 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_183 = LOGAND(op_RSHIFT_181, SN(32, 0xffff)); + RzILOpPure *op_MUL_186 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_175), DUP(op_AND_175))), CAST(16, MSB(DUP(op_AND_175)), DUP(op_AND_175))), CAST(32, MSB(CAST(16, MSB(op_AND_183), DUP(op_AND_183))), CAST(16, MSB(DUP(op_AND_183)), DUP(op_AND_183)))); + RzILOpPure *op_LSHIFT_189 = SHIFTL0(CAST(64, MSB(op_MUL_186), DUP(op_MUL_186)), SN(32, 0)); + RzILOpPure *op_EQ_190 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_LSHIFT_164), SN(32, 0), SN(32, 0x20)), op_LSHIFT_189); + RzILOpPure *op_RSHIFT_217 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_219 = LOGAND(op_RSHIFT_217, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_225 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_227 = LOGAND(op_RSHIFT_225, SN(32, 0xffff)); + RzILOpPure *op_MUL_230 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_219), DUP(op_AND_219))), CAST(16, MSB(DUP(op_AND_219)), DUP(op_AND_219))), CAST(32, MSB(CAST(16, MSB(op_AND_227), DUP(op_AND_227))), CAST(16, MSB(DUP(op_AND_227)), DUP(op_AND_227)))); + RzILOpPure *op_LSHIFT_233 = SHIFTL0(CAST(64, MSB(op_MUL_230), DUP(op_MUL_230)), SN(32, 0)); + RzILOpPure *op_LT_236 = SLT(op_LSHIFT_233, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_241 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_242 = NEG(op_LSHIFT_241); + RzILOpPure *op_LSHIFT_247 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_250 = SUB(op_LSHIFT_247, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_251 = ITE(op_LT_236, op_NEG_242, op_SUB_250); + RzILOpEffect *gcc_expr_252 = BRANCH(op_EQ_190, EMPTY(), set_usr_field_call_213); + + // h_tmp408 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_254 = SETL("h_tmp408", cond_251); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_255 = SEQN(2, gcc_expr_252, op_ASSIGN_hybrid_tmp_254); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0)) ? (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) : h_tmp408) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_139 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_140 = LOGNOT(op_LSHIFT_139); + RzILOpPure *op_AND_141 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_140); + RzILOpPure *op_RSHIFT_194 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_196 = LOGAND(op_RSHIFT_194, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_202 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_204 = LOGAND(op_RSHIFT_202, SN(32, 0xffff)); + RzILOpPure *op_MUL_207 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_196), DUP(op_AND_196))), CAST(16, MSB(DUP(op_AND_196)), DUP(op_AND_196))), CAST(32, MSB(CAST(16, MSB(op_AND_204), DUP(op_AND_204))), CAST(16, MSB(DUP(op_AND_204)), DUP(op_AND_204)))); + RzILOpPure *op_LSHIFT_210 = SHIFTL0(CAST(64, MSB(op_MUL_207), DUP(op_MUL_207)), SN(32, 0)); + RzILOpPure *cond_256 = ITE(DUP(op_EQ_190), op_LSHIFT_210, VARL("h_tmp408")); + RzILOpPure *op_AND_258 = LOGAND(cond_256, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_262 = SHIFTL0(op_AND_258, SN(32, 0x20)); + RzILOpPure *op_OR_263 = LOGOR(op_AND_141, op_LSHIFT_262); + RzILOpEffect *op_ASSIGN_264 = WRITE_REG(bundle, Rdd_op, op_OR_263); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_265 = SEQN(2, seq_255, op_ASSIGN_264); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_133, seq_265); + return instruction_sequence; +} + +// Rd = vmpyh(Rs,Rt):rnd:sat +RzILOpEffect *hex_il_op_m2_vmpy2s_s0pack(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_91 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_25 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_27 = LOGAND(op_RSHIFT_25, SN(32, 0xffff)); + RzILOpPure *op_MUL_30 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(CAST(16, MSB(op_AND_27), DUP(op_AND_27))), CAST(16, MSB(DUP(op_AND_27)), DUP(op_AND_27)))); + RzILOpPure *op_LSHIFT_33 = SHIFTL0(CAST(64, MSB(op_MUL_30), DUP(op_MUL_30)), SN(32, 0)); + RzILOpPure *op_ADD_36 = ADD(op_LSHIFT_33, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_45 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_47 = LOGAND(op_RSHIFT_45, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_53 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_55 = LOGAND(op_RSHIFT_53, SN(32, 0xffff)); + RzILOpPure *op_MUL_58 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_47), DUP(op_AND_47))), CAST(16, MSB(DUP(op_AND_47)), DUP(op_AND_47))), CAST(32, MSB(CAST(16, MSB(op_AND_55), DUP(op_AND_55))), CAST(16, MSB(DUP(op_AND_55)), DUP(op_AND_55)))); + RzILOpPure *op_LSHIFT_61 = SHIFTL0(CAST(64, MSB(op_MUL_58), DUP(op_MUL_58)), SN(32, 0)); + RzILOpPure *op_ADD_64 = ADD(op_LSHIFT_61, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_EQ_65 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_36), SN(32, 0), SN(32, 0x20)), op_ADD_64); + RzILOpPure *op_RSHIFT_95 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_97 = LOGAND(op_RSHIFT_95, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_103 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_105 = LOGAND(op_RSHIFT_103, SN(32, 0xffff)); + RzILOpPure *op_MUL_108 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_97), DUP(op_AND_97))), CAST(16, MSB(DUP(op_AND_97)), DUP(op_AND_97))), CAST(32, MSB(CAST(16, MSB(op_AND_105), DUP(op_AND_105))), CAST(16, MSB(DUP(op_AND_105)), DUP(op_AND_105)))); + RzILOpPure *op_LSHIFT_111 = SHIFTL0(CAST(64, MSB(op_MUL_108), DUP(op_MUL_108)), SN(32, 0)); + RzILOpPure *op_ADD_114 = ADD(op_LSHIFT_111, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_LT_117 = SLT(op_ADD_114, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_122 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_123 = NEG(op_LSHIFT_122); + RzILOpPure *op_LSHIFT_128 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_131 = SUB(op_LSHIFT_128, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_132 = ITE(op_LT_117, op_NEG_123, op_SUB_131); + RzILOpEffect *gcc_expr_133 = BRANCH(op_EQ_65, EMPTY(), set_usr_field_call_91); + + // h_tmp409 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_135 = SETL("h_tmp409", cond_132); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_136 = SEQN(2, gcc_expr_133, op_ASSIGN_hybrid_tmp_135); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << 0x10)))) | (((ut64) (((st32) ((st16) ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000)) ? (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x0) + ((st64) 0x8000) : h_tmp409) >> 0x10) & ((st64) 0xffff)))) & 0xffff)) << 0x10))); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffff), SN(32, 16)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_8 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_6); + RzILOpPure *op_RSHIFT_69 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_71 = LOGAND(op_RSHIFT_69, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_77 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_79 = LOGAND(op_RSHIFT_77, SN(32, 0xffff)); + RzILOpPure *op_MUL_82 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_71), DUP(op_AND_71))), CAST(16, MSB(DUP(op_AND_71)), DUP(op_AND_71))), CAST(32, MSB(CAST(16, MSB(op_AND_79), DUP(op_AND_79))), CAST(16, MSB(DUP(op_AND_79)), DUP(op_AND_79)))); + RzILOpPure *op_LSHIFT_85 = SHIFTL0(CAST(64, MSB(op_MUL_82), DUP(op_MUL_82)), SN(32, 0)); + RzILOpPure *op_ADD_88 = ADD(op_LSHIFT_85, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *cond_137 = ITE(DUP(op_EQ_65), op_ADD_88, VARL("h_tmp409")); + RzILOpPure *op_RSHIFT_141 = SHIFTRA(cond_137, SN(32, 16)); + RzILOpPure *op_AND_144 = LOGAND(op_RSHIFT_141, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_148 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_144), DUP(op_AND_144))), CAST(16, MSB(DUP(op_AND_144)), DUP(op_AND_144))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_153 = SHIFTL0(CAST(64, IL_FALSE, op_AND_148), SN(32, 16)); + RzILOpPure *op_OR_155 = LOGOR(CAST(64, IL_FALSE, op_AND_8), op_LSHIFT_153); + RzILOpEffect *op_ASSIGN_157 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_155)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_158 = SEQN(2, seq_136, op_ASSIGN_157); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_248 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_174 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_176 = LOGAND(op_RSHIFT_174, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_182 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_184 = LOGAND(op_RSHIFT_182, SN(32, 0xffff)); + RzILOpPure *op_MUL_187 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_176), DUP(op_AND_176))), CAST(16, MSB(DUP(op_AND_176)), DUP(op_AND_176))), CAST(32, MSB(CAST(16, MSB(op_AND_184), DUP(op_AND_184))), CAST(16, MSB(DUP(op_AND_184)), DUP(op_AND_184)))); + RzILOpPure *op_LSHIFT_190 = SHIFTL0(CAST(64, MSB(op_MUL_187), DUP(op_MUL_187)), SN(32, 0)); + RzILOpPure *op_ADD_193 = ADD(op_LSHIFT_190, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_202 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_204 = LOGAND(op_RSHIFT_202, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_210 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_212 = LOGAND(op_RSHIFT_210, SN(32, 0xffff)); + RzILOpPure *op_MUL_215 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_204), DUP(op_AND_204))), CAST(16, MSB(DUP(op_AND_204)), DUP(op_AND_204))), CAST(32, MSB(CAST(16, MSB(op_AND_212), DUP(op_AND_212))), CAST(16, MSB(DUP(op_AND_212)), DUP(op_AND_212)))); + RzILOpPure *op_LSHIFT_218 = SHIFTL0(CAST(64, MSB(op_MUL_215), DUP(op_MUL_215)), SN(32, 0)); + RzILOpPure *op_ADD_221 = ADD(op_LSHIFT_218, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_EQ_222 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_193), SN(32, 0), SN(32, 0x20)), op_ADD_221); + RzILOpPure *op_RSHIFT_252 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_254 = LOGAND(op_RSHIFT_252, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_260 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_262 = LOGAND(op_RSHIFT_260, SN(32, 0xffff)); + RzILOpPure *op_MUL_265 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_254), DUP(op_AND_254))), CAST(16, MSB(DUP(op_AND_254)), DUP(op_AND_254))), CAST(32, MSB(CAST(16, MSB(op_AND_262), DUP(op_AND_262))), CAST(16, MSB(DUP(op_AND_262)), DUP(op_AND_262)))); + RzILOpPure *op_LSHIFT_268 = SHIFTL0(CAST(64, MSB(op_MUL_265), DUP(op_MUL_265)), SN(32, 0)); + RzILOpPure *op_ADD_271 = ADD(op_LSHIFT_268, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_LT_274 = SLT(op_ADD_271, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_279 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_280 = NEG(op_LSHIFT_279); + RzILOpPure *op_LSHIFT_285 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_288 = SUB(op_LSHIFT_285, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_289 = ITE(op_LT_274, op_NEG_280, op_SUB_288); + RzILOpEffect *gcc_expr_290 = BRANCH(op_EQ_222, EMPTY(), set_usr_field_call_248); + + // h_tmp410 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_292 = SETL("h_tmp410", cond_289); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_293 = SEQN(2, gcc_expr_290, op_ASSIGN_hybrid_tmp_292); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << 0x0)))) | (((ut64) (((st32) ((st16) ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + ((st64) 0x8000)) ? (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x0) + ((st64) 0x8000) : h_tmp410) >> 0x10) & ((st64) 0xffff)))) & 0xffff)) << 0x0))); + RzILOpPure *op_LSHIFT_164 = SHIFTL0(SN(64, 0xffff), SN(32, 0)); + RzILOpPure *op_NOT_165 = LOGNOT(op_LSHIFT_164); + RzILOpPure *op_AND_167 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_165); + RzILOpPure *op_RSHIFT_226 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_228 = LOGAND(op_RSHIFT_226, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_234 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_236 = LOGAND(op_RSHIFT_234, SN(32, 0xffff)); + RzILOpPure *op_MUL_239 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_228), DUP(op_AND_228))), CAST(16, MSB(DUP(op_AND_228)), DUP(op_AND_228))), CAST(32, MSB(CAST(16, MSB(op_AND_236), DUP(op_AND_236))), CAST(16, MSB(DUP(op_AND_236)), DUP(op_AND_236)))); + RzILOpPure *op_LSHIFT_242 = SHIFTL0(CAST(64, MSB(op_MUL_239), DUP(op_MUL_239)), SN(32, 0)); + RzILOpPure *op_ADD_245 = ADD(op_LSHIFT_242, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *cond_294 = ITE(DUP(op_EQ_222), op_ADD_245, VARL("h_tmp410")); + RzILOpPure *op_RSHIFT_298 = SHIFTRA(cond_294, SN(32, 16)); + RzILOpPure *op_AND_301 = LOGAND(op_RSHIFT_298, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_305 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_301), DUP(op_AND_301))), CAST(16, MSB(DUP(op_AND_301)), DUP(op_AND_301))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_310 = SHIFTL0(CAST(64, IL_FALSE, op_AND_305), SN(32, 0)); + RzILOpPure *op_OR_312 = LOGOR(CAST(64, IL_FALSE, op_AND_167), op_LSHIFT_310); + RzILOpEffect *op_ASSIGN_314 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_312)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_315 = SEQN(2, seq_293, op_ASSIGN_314); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_158, seq_315); + return instruction_sequence; +} + +// Rdd = vmpyh(Rs,Rt):<<1:sat +RzILOpEffect *hex_il_op_m2_vmpy2s_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_81 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_17 = LOGAND(op_RSHIFT_15, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_24 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_26 = LOGAND(op_RSHIFT_24, SN(32, 0xffff)); + RzILOpPure *op_MUL_29 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_17), DUP(op_AND_17))), CAST(16, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(32, MSB(CAST(16, MSB(op_AND_26), DUP(op_AND_26))), CAST(16, MSB(DUP(op_AND_26)), DUP(op_AND_26)))); + RzILOpPure *op_LSHIFT_32 = SHIFTL0(CAST(64, MSB(op_MUL_29), DUP(op_MUL_29)), SN(32, 1)); + RzILOpPure *op_RSHIFT_41 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_43 = LOGAND(op_RSHIFT_41, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_49 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_51 = LOGAND(op_RSHIFT_49, SN(32, 0xffff)); + RzILOpPure *op_MUL_54 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_43), DUP(op_AND_43))), CAST(16, MSB(DUP(op_AND_43)), DUP(op_AND_43))), CAST(32, MSB(CAST(16, MSB(op_AND_51), DUP(op_AND_51))), CAST(16, MSB(DUP(op_AND_51)), DUP(op_AND_51)))); + RzILOpPure *op_LSHIFT_57 = SHIFTL0(CAST(64, MSB(op_MUL_54), DUP(op_MUL_54)), SN(32, 1)); + RzILOpPure *op_EQ_58 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_LSHIFT_32), SN(32, 0), SN(32, 0x20)), op_LSHIFT_57); + RzILOpPure *op_RSHIFT_85 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_87 = LOGAND(op_RSHIFT_85, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_93 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_95 = LOGAND(op_RSHIFT_93, SN(32, 0xffff)); + RzILOpPure *op_MUL_98 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_87), DUP(op_AND_87))), CAST(16, MSB(DUP(op_AND_87)), DUP(op_AND_87))), CAST(32, MSB(CAST(16, MSB(op_AND_95), DUP(op_AND_95))), CAST(16, MSB(DUP(op_AND_95)), DUP(op_AND_95)))); + RzILOpPure *op_LSHIFT_101 = SHIFTL0(CAST(64, MSB(op_MUL_98), DUP(op_MUL_98)), SN(32, 1)); + RzILOpPure *op_LT_104 = SLT(op_LSHIFT_101, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_109 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_110 = NEG(op_LSHIFT_109); + RzILOpPure *op_LSHIFT_115 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_118 = SUB(op_LSHIFT_115, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_119 = ITE(op_LT_104, op_NEG_110, op_SUB_118); + RzILOpEffect *gcc_expr_120 = BRANCH(op_EQ_58, EMPTY(), set_usr_field_call_81); + + // h_tmp411 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_122 = SETL("h_tmp411", cond_119); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_123 = SEQN(2, gcc_expr_120, op_ASSIGN_hybrid_tmp_122); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1)) ? (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) : h_tmp411) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_62 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_64 = LOGAND(op_RSHIFT_62, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_70 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_72 = LOGAND(op_RSHIFT_70, SN(32, 0xffff)); + RzILOpPure *op_MUL_75 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_64), DUP(op_AND_64))), CAST(16, MSB(DUP(op_AND_64)), DUP(op_AND_64))), CAST(32, MSB(CAST(16, MSB(op_AND_72), DUP(op_AND_72))), CAST(16, MSB(DUP(op_AND_72)), DUP(op_AND_72)))); + RzILOpPure *op_LSHIFT_78 = SHIFTL0(CAST(64, MSB(op_MUL_75), DUP(op_MUL_75)), SN(32, 1)); + RzILOpPure *cond_124 = ITE(DUP(op_EQ_58), op_LSHIFT_78, VARL("h_tmp411")); + RzILOpPure *op_AND_126 = LOGAND(cond_124, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_130 = SHIFTL0(op_AND_126, SN(32, 0)); + RzILOpPure *op_OR_131 = LOGOR(op_AND_7, op_LSHIFT_130); + RzILOpEffect *op_ASSIGN_132 = WRITE_REG(bundle, Rdd_op, op_OR_131); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_133 = SEQN(2, seq_123, op_ASSIGN_132); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_213 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_148 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_150 = LOGAND(op_RSHIFT_148, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_156 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_158 = LOGAND(op_RSHIFT_156, SN(32, 0xffff)); + RzILOpPure *op_MUL_161 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_150), DUP(op_AND_150))), CAST(16, MSB(DUP(op_AND_150)), DUP(op_AND_150))), CAST(32, MSB(CAST(16, MSB(op_AND_158), DUP(op_AND_158))), CAST(16, MSB(DUP(op_AND_158)), DUP(op_AND_158)))); + RzILOpPure *op_LSHIFT_164 = SHIFTL0(CAST(64, MSB(op_MUL_161), DUP(op_MUL_161)), SN(32, 1)); + RzILOpPure *op_RSHIFT_173 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_175 = LOGAND(op_RSHIFT_173, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_181 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_183 = LOGAND(op_RSHIFT_181, SN(32, 0xffff)); + RzILOpPure *op_MUL_186 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_175), DUP(op_AND_175))), CAST(16, MSB(DUP(op_AND_175)), DUP(op_AND_175))), CAST(32, MSB(CAST(16, MSB(op_AND_183), DUP(op_AND_183))), CAST(16, MSB(DUP(op_AND_183)), DUP(op_AND_183)))); + RzILOpPure *op_LSHIFT_189 = SHIFTL0(CAST(64, MSB(op_MUL_186), DUP(op_MUL_186)), SN(32, 1)); + RzILOpPure *op_EQ_190 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_LSHIFT_164), SN(32, 0), SN(32, 0x20)), op_LSHIFT_189); + RzILOpPure *op_RSHIFT_217 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_219 = LOGAND(op_RSHIFT_217, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_225 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_227 = LOGAND(op_RSHIFT_225, SN(32, 0xffff)); + RzILOpPure *op_MUL_230 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_219), DUP(op_AND_219))), CAST(16, MSB(DUP(op_AND_219)), DUP(op_AND_219))), CAST(32, MSB(CAST(16, MSB(op_AND_227), DUP(op_AND_227))), CAST(16, MSB(DUP(op_AND_227)), DUP(op_AND_227)))); + RzILOpPure *op_LSHIFT_233 = SHIFTL0(CAST(64, MSB(op_MUL_230), DUP(op_MUL_230)), SN(32, 1)); + RzILOpPure *op_LT_236 = SLT(op_LSHIFT_233, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_241 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_242 = NEG(op_LSHIFT_241); + RzILOpPure *op_LSHIFT_247 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_250 = SUB(op_LSHIFT_247, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_251 = ITE(op_LT_236, op_NEG_242, op_SUB_250); + RzILOpEffect *gcc_expr_252 = BRANCH(op_EQ_190, EMPTY(), set_usr_field_call_213); + + // h_tmp412 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_254 = SETL("h_tmp412", cond_251); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_255 = SEQN(2, gcc_expr_252, op_ASSIGN_hybrid_tmp_254); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1)) ? (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) : h_tmp412) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_139 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_140 = LOGNOT(op_LSHIFT_139); + RzILOpPure *op_AND_141 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_140); + RzILOpPure *op_RSHIFT_194 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_196 = LOGAND(op_RSHIFT_194, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_202 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_204 = LOGAND(op_RSHIFT_202, SN(32, 0xffff)); + RzILOpPure *op_MUL_207 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_196), DUP(op_AND_196))), CAST(16, MSB(DUP(op_AND_196)), DUP(op_AND_196))), CAST(32, MSB(CAST(16, MSB(op_AND_204), DUP(op_AND_204))), CAST(16, MSB(DUP(op_AND_204)), DUP(op_AND_204)))); + RzILOpPure *op_LSHIFT_210 = SHIFTL0(CAST(64, MSB(op_MUL_207), DUP(op_MUL_207)), SN(32, 1)); + RzILOpPure *cond_256 = ITE(DUP(op_EQ_190), op_LSHIFT_210, VARL("h_tmp412")); + RzILOpPure *op_AND_258 = LOGAND(cond_256, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_262 = SHIFTL0(op_AND_258, SN(32, 0x20)); + RzILOpPure *op_OR_263 = LOGOR(op_AND_141, op_LSHIFT_262); + RzILOpEffect *op_ASSIGN_264 = WRITE_REG(bundle, Rdd_op, op_OR_263); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_265 = SEQN(2, seq_255, op_ASSIGN_264); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_133, seq_265); + return instruction_sequence; +} + +// Rd = vmpyh(Rs,Rt):<<1:rnd:sat +RzILOpEffect *hex_il_op_m2_vmpy2s_s1pack(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_91 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rs, SN(32, 16)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_25 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_27 = LOGAND(op_RSHIFT_25, SN(32, 0xffff)); + RzILOpPure *op_MUL_30 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(CAST(16, MSB(op_AND_27), DUP(op_AND_27))), CAST(16, MSB(DUP(op_AND_27)), DUP(op_AND_27)))); + RzILOpPure *op_LSHIFT_33 = SHIFTL0(CAST(64, MSB(op_MUL_30), DUP(op_MUL_30)), SN(32, 1)); + RzILOpPure *op_ADD_36 = ADD(op_LSHIFT_33, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_45 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_47 = LOGAND(op_RSHIFT_45, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_53 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_55 = LOGAND(op_RSHIFT_53, SN(32, 0xffff)); + RzILOpPure *op_MUL_58 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_47), DUP(op_AND_47))), CAST(16, MSB(DUP(op_AND_47)), DUP(op_AND_47))), CAST(32, MSB(CAST(16, MSB(op_AND_55), DUP(op_AND_55))), CAST(16, MSB(DUP(op_AND_55)), DUP(op_AND_55)))); + RzILOpPure *op_LSHIFT_61 = SHIFTL0(CAST(64, MSB(op_MUL_58), DUP(op_MUL_58)), SN(32, 1)); + RzILOpPure *op_ADD_64 = ADD(op_LSHIFT_61, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_EQ_65 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_36), SN(32, 0), SN(32, 0x20)), op_ADD_64); + RzILOpPure *op_RSHIFT_95 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_97 = LOGAND(op_RSHIFT_95, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_103 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_105 = LOGAND(op_RSHIFT_103, SN(32, 0xffff)); + RzILOpPure *op_MUL_108 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_97), DUP(op_AND_97))), CAST(16, MSB(DUP(op_AND_97)), DUP(op_AND_97))), CAST(32, MSB(CAST(16, MSB(op_AND_105), DUP(op_AND_105))), CAST(16, MSB(DUP(op_AND_105)), DUP(op_AND_105)))); + RzILOpPure *op_LSHIFT_111 = SHIFTL0(CAST(64, MSB(op_MUL_108), DUP(op_MUL_108)), SN(32, 1)); + RzILOpPure *op_ADD_114 = ADD(op_LSHIFT_111, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_LT_117 = SLT(op_ADD_114, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_122 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_123 = NEG(op_LSHIFT_122); + RzILOpPure *op_LSHIFT_128 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_131 = SUB(op_LSHIFT_128, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_132 = ITE(op_LT_117, op_NEG_123, op_SUB_131); + RzILOpEffect *gcc_expr_133 = BRANCH(op_EQ_65, EMPTY(), set_usr_field_call_91); + + // h_tmp413 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_135 = SETL("h_tmp413", cond_132); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_136 = SEQN(2, gcc_expr_133, op_ASSIGN_hybrid_tmp_135); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << 0x10)))) | (((ut64) (((st32) ((st16) ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000)) ? (((st64) ((st32) ((st16) ((Rs >> 0x10) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x10) & 0xffff)))) << 0x1) + ((st64) 0x8000) : h_tmp413) >> 0x10) & ((st64) 0xffff)))) & 0xffff)) << 0x10))); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffff), SN(32, 16)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_8 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_6); + RzILOpPure *op_RSHIFT_69 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_71 = LOGAND(op_RSHIFT_69, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_77 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_79 = LOGAND(op_RSHIFT_77, SN(32, 0xffff)); + RzILOpPure *op_MUL_82 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_71), DUP(op_AND_71))), CAST(16, MSB(DUP(op_AND_71)), DUP(op_AND_71))), CAST(32, MSB(CAST(16, MSB(op_AND_79), DUP(op_AND_79))), CAST(16, MSB(DUP(op_AND_79)), DUP(op_AND_79)))); + RzILOpPure *op_LSHIFT_85 = SHIFTL0(CAST(64, MSB(op_MUL_82), DUP(op_MUL_82)), SN(32, 1)); + RzILOpPure *op_ADD_88 = ADD(op_LSHIFT_85, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *cond_137 = ITE(DUP(op_EQ_65), op_ADD_88, VARL("h_tmp413")); + RzILOpPure *op_RSHIFT_141 = SHIFTRA(cond_137, SN(32, 16)); + RzILOpPure *op_AND_144 = LOGAND(op_RSHIFT_141, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_148 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_144), DUP(op_AND_144))), CAST(16, MSB(DUP(op_AND_144)), DUP(op_AND_144))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_153 = SHIFTL0(CAST(64, IL_FALSE, op_AND_148), SN(32, 16)); + RzILOpPure *op_OR_155 = LOGOR(CAST(64, IL_FALSE, op_AND_8), op_LSHIFT_153); + RzILOpEffect *op_ASSIGN_157 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_155)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_158 = SEQN(2, seq_136, op_ASSIGN_157); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_248 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_174 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_176 = LOGAND(op_RSHIFT_174, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_182 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_184 = LOGAND(op_RSHIFT_182, SN(32, 0xffff)); + RzILOpPure *op_MUL_187 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_176), DUP(op_AND_176))), CAST(16, MSB(DUP(op_AND_176)), DUP(op_AND_176))), CAST(32, MSB(CAST(16, MSB(op_AND_184), DUP(op_AND_184))), CAST(16, MSB(DUP(op_AND_184)), DUP(op_AND_184)))); + RzILOpPure *op_LSHIFT_190 = SHIFTL0(CAST(64, MSB(op_MUL_187), DUP(op_MUL_187)), SN(32, 1)); + RzILOpPure *op_ADD_193 = ADD(op_LSHIFT_190, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_202 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_204 = LOGAND(op_RSHIFT_202, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_210 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_212 = LOGAND(op_RSHIFT_210, SN(32, 0xffff)); + RzILOpPure *op_MUL_215 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_204), DUP(op_AND_204))), CAST(16, MSB(DUP(op_AND_204)), DUP(op_AND_204))), CAST(32, MSB(CAST(16, MSB(op_AND_212), DUP(op_AND_212))), CAST(16, MSB(DUP(op_AND_212)), DUP(op_AND_212)))); + RzILOpPure *op_LSHIFT_218 = SHIFTL0(CAST(64, MSB(op_MUL_215), DUP(op_MUL_215)), SN(32, 1)); + RzILOpPure *op_ADD_221 = ADD(op_LSHIFT_218, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_EQ_222 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_193), SN(32, 0), SN(32, 0x20)), op_ADD_221); + RzILOpPure *op_RSHIFT_252 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_254 = LOGAND(op_RSHIFT_252, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_260 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_262 = LOGAND(op_RSHIFT_260, SN(32, 0xffff)); + RzILOpPure *op_MUL_265 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_254), DUP(op_AND_254))), CAST(16, MSB(DUP(op_AND_254)), DUP(op_AND_254))), CAST(32, MSB(CAST(16, MSB(op_AND_262), DUP(op_AND_262))), CAST(16, MSB(DUP(op_AND_262)), DUP(op_AND_262)))); + RzILOpPure *op_LSHIFT_268 = SHIFTL0(CAST(64, MSB(op_MUL_265), DUP(op_MUL_265)), SN(32, 1)); + RzILOpPure *op_ADD_271 = ADD(op_LSHIFT_268, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_LT_274 = SLT(op_ADD_271, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_279 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_280 = NEG(op_LSHIFT_279); + RzILOpPure *op_LSHIFT_285 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_288 = SUB(op_LSHIFT_285, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_289 = ITE(op_LT_274, op_NEG_280, op_SUB_288); + RzILOpEffect *gcc_expr_290 = BRANCH(op_EQ_222, EMPTY(), set_usr_field_call_248); + + // h_tmp414 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_292 = SETL("h_tmp414", cond_289); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_293 = SEQN(2, gcc_expr_290, op_ASSIGN_hybrid_tmp_292); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << 0x0)))) | (((ut64) (((st32) ((st16) ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + ((st64) 0x8000)) ? (((st64) ((st32) ((st16) ((Rs >> 0x0) & 0xffff))) * ((st32) ((st16) ((Rt >> 0x0) & 0xffff)))) << 0x1) + ((st64) 0x8000) : h_tmp414) >> 0x10) & ((st64) 0xffff)))) & 0xffff)) << 0x0))); + RzILOpPure *op_LSHIFT_164 = SHIFTL0(SN(64, 0xffff), SN(32, 0)); + RzILOpPure *op_NOT_165 = LOGNOT(op_LSHIFT_164); + RzILOpPure *op_AND_167 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_165); + RzILOpPure *op_RSHIFT_226 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_228 = LOGAND(op_RSHIFT_226, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_234 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_236 = LOGAND(op_RSHIFT_234, SN(32, 0xffff)); + RzILOpPure *op_MUL_239 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_228), DUP(op_AND_228))), CAST(16, MSB(DUP(op_AND_228)), DUP(op_AND_228))), CAST(32, MSB(CAST(16, MSB(op_AND_236), DUP(op_AND_236))), CAST(16, MSB(DUP(op_AND_236)), DUP(op_AND_236)))); + RzILOpPure *op_LSHIFT_242 = SHIFTL0(CAST(64, MSB(op_MUL_239), DUP(op_MUL_239)), SN(32, 1)); + RzILOpPure *op_ADD_245 = ADD(op_LSHIFT_242, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *cond_294 = ITE(DUP(op_EQ_222), op_ADD_245, VARL("h_tmp414")); + RzILOpPure *op_RSHIFT_298 = SHIFTRA(cond_294, SN(32, 16)); + RzILOpPure *op_AND_301 = LOGAND(op_RSHIFT_298, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_305 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_301), DUP(op_AND_301))), CAST(16, MSB(DUP(op_AND_301)), DUP(op_AND_301))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_310 = SHIFTL0(CAST(64, IL_FALSE, op_AND_305), SN(32, 0)); + RzILOpPure *op_OR_312 = LOGOR(CAST(64, IL_FALSE, op_AND_167), op_LSHIFT_310); + RzILOpEffect *op_ASSIGN_314 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_312)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_315 = SEQN(2, seq_293, op_ASSIGN_314); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_158, seq_315); + return instruction_sequence; +} + +// Rdd = vmpyhsu(Rs,Rt):sat +RzILOpEffect *hex_il_op_m2_vmpy2su_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_87 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_17 = LOGAND(op_RSHIFT_15, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_24 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_26 = LOGAND(op_RSHIFT_24, SN(32, 0xffff)); + RzILOpPure *op_MUL_30 = MUL(CAST(32, IL_FALSE, CAST(32, MSB(CAST(16, MSB(op_AND_17), DUP(op_AND_17))), CAST(16, MSB(DUP(op_AND_17)), DUP(op_AND_17)))), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_26))); + RzILOpPure *op_LSHIFT_34 = SHIFTL0(CAST(64, MSB(CAST(32, IL_FALSE, op_MUL_30)), CAST(32, IL_FALSE, DUP(op_MUL_30))), SN(32, 0)); + RzILOpPure *op_RSHIFT_43 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_45 = LOGAND(op_RSHIFT_43, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_51 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_53 = LOGAND(op_RSHIFT_51, SN(32, 0xffff)); + RzILOpPure *op_MUL_57 = MUL(CAST(32, IL_FALSE, CAST(32, MSB(CAST(16, MSB(op_AND_45), DUP(op_AND_45))), CAST(16, MSB(DUP(op_AND_45)), DUP(op_AND_45)))), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_53))); + RzILOpPure *op_LSHIFT_61 = SHIFTL0(CAST(64, MSB(CAST(32, IL_FALSE, op_MUL_57)), CAST(32, IL_FALSE, DUP(op_MUL_57))), SN(32, 0)); + RzILOpPure *op_EQ_62 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_LSHIFT_34), SN(32, 0), SN(32, 0x20)), op_LSHIFT_61); + RzILOpPure *op_RSHIFT_91 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_93 = LOGAND(op_RSHIFT_91, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_99 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_101 = LOGAND(op_RSHIFT_99, SN(32, 0xffff)); + RzILOpPure *op_MUL_105 = MUL(CAST(32, IL_FALSE, CAST(32, MSB(CAST(16, MSB(op_AND_93), DUP(op_AND_93))), CAST(16, MSB(DUP(op_AND_93)), DUP(op_AND_93)))), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_101))); + RzILOpPure *op_LSHIFT_109 = SHIFTL0(CAST(64, MSB(CAST(32, IL_FALSE, op_MUL_105)), CAST(32, IL_FALSE, DUP(op_MUL_105))), SN(32, 0)); + RzILOpPure *op_LT_112 = SLT(op_LSHIFT_109, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_117 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_118 = NEG(op_LSHIFT_117); + RzILOpPure *op_LSHIFT_123 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_126 = SUB(op_LSHIFT_123, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_127 = ITE(op_LT_112, op_NEG_118, op_SUB_126); + RzILOpEffect *gcc_expr_128 = BRANCH(op_EQ_62, EMPTY(), set_usr_field_call_87); + + // h_tmp415 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_130 = SETL("h_tmp415", cond_127); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((ut32 ...; + RzILOpEffect *seq_131 = SEQN(2, gcc_expr_128, op_ASSIGN_hybrid_tmp_130); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x0)) ? (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x0) : h_tmp415) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_66 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_68 = LOGAND(op_RSHIFT_66, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_74 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_76 = LOGAND(op_RSHIFT_74, SN(32, 0xffff)); + RzILOpPure *op_MUL_80 = MUL(CAST(32, IL_FALSE, CAST(32, MSB(CAST(16, MSB(op_AND_68), DUP(op_AND_68))), CAST(16, MSB(DUP(op_AND_68)), DUP(op_AND_68)))), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_76))); + RzILOpPure *op_LSHIFT_84 = SHIFTL0(CAST(64, MSB(CAST(32, IL_FALSE, op_MUL_80)), CAST(32, IL_FALSE, DUP(op_MUL_80))), SN(32, 0)); + RzILOpPure *cond_132 = ITE(DUP(op_EQ_62), op_LSHIFT_84, VARL("h_tmp415")); + RzILOpPure *op_AND_134 = LOGAND(cond_132, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_138 = SHIFTL0(op_AND_134, SN(32, 0)); + RzILOpPure *op_OR_139 = LOGOR(op_AND_7, op_LSHIFT_138); + RzILOpEffect *op_ASSIGN_140 = WRITE_REG(bundle, Rdd_op, op_OR_139); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_141 = SEQN(2, seq_131, op_ASSIGN_140); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_227 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_156 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_158 = LOGAND(op_RSHIFT_156, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_164 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_166 = LOGAND(op_RSHIFT_164, SN(32, 0xffff)); + RzILOpPure *op_MUL_170 = MUL(CAST(32, IL_FALSE, CAST(32, MSB(CAST(16, MSB(op_AND_158), DUP(op_AND_158))), CAST(16, MSB(DUP(op_AND_158)), DUP(op_AND_158)))), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_166))); + RzILOpPure *op_LSHIFT_174 = SHIFTL0(CAST(64, MSB(CAST(32, IL_FALSE, op_MUL_170)), CAST(32, IL_FALSE, DUP(op_MUL_170))), SN(32, 0)); + RzILOpPure *op_RSHIFT_183 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_185 = LOGAND(op_RSHIFT_183, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_191 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_193 = LOGAND(op_RSHIFT_191, SN(32, 0xffff)); + RzILOpPure *op_MUL_197 = MUL(CAST(32, IL_FALSE, CAST(32, MSB(CAST(16, MSB(op_AND_185), DUP(op_AND_185))), CAST(16, MSB(DUP(op_AND_185)), DUP(op_AND_185)))), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_193))); + RzILOpPure *op_LSHIFT_201 = SHIFTL0(CAST(64, MSB(CAST(32, IL_FALSE, op_MUL_197)), CAST(32, IL_FALSE, DUP(op_MUL_197))), SN(32, 0)); + RzILOpPure *op_EQ_202 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_LSHIFT_174), SN(32, 0), SN(32, 0x20)), op_LSHIFT_201); + RzILOpPure *op_RSHIFT_231 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_233 = LOGAND(op_RSHIFT_231, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_239 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_241 = LOGAND(op_RSHIFT_239, SN(32, 0xffff)); + RzILOpPure *op_MUL_245 = MUL(CAST(32, IL_FALSE, CAST(32, MSB(CAST(16, MSB(op_AND_233), DUP(op_AND_233))), CAST(16, MSB(DUP(op_AND_233)), DUP(op_AND_233)))), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_241))); + RzILOpPure *op_LSHIFT_249 = SHIFTL0(CAST(64, MSB(CAST(32, IL_FALSE, op_MUL_245)), CAST(32, IL_FALSE, DUP(op_MUL_245))), SN(32, 0)); + RzILOpPure *op_LT_252 = SLT(op_LSHIFT_249, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_257 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_258 = NEG(op_LSHIFT_257); + RzILOpPure *op_LSHIFT_263 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_266 = SUB(op_LSHIFT_263, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_267 = ITE(op_LT_252, op_NEG_258, op_SUB_266); + RzILOpEffect *gcc_expr_268 = BRANCH(op_EQ_202, EMPTY(), set_usr_field_call_227); + + // h_tmp416 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x0))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x0) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_270 = SETL("h_tmp416", cond_267); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((ut32 ...; + RzILOpEffect *seq_271 = SEQN(2, gcc_expr_268, op_ASSIGN_hybrid_tmp_270); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x0)), 0x0, 0x20) == (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x0)) ? (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x0) : h_tmp416) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_147 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_148 = LOGNOT(op_LSHIFT_147); + RzILOpPure *op_AND_149 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_148); + RzILOpPure *op_RSHIFT_206 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_208 = LOGAND(op_RSHIFT_206, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_214 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_216 = LOGAND(op_RSHIFT_214, SN(32, 0xffff)); + RzILOpPure *op_MUL_220 = MUL(CAST(32, IL_FALSE, CAST(32, MSB(CAST(16, MSB(op_AND_208), DUP(op_AND_208))), CAST(16, MSB(DUP(op_AND_208)), DUP(op_AND_208)))), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_216))); + RzILOpPure *op_LSHIFT_224 = SHIFTL0(CAST(64, MSB(CAST(32, IL_FALSE, op_MUL_220)), CAST(32, IL_FALSE, DUP(op_MUL_220))), SN(32, 0)); + RzILOpPure *cond_272 = ITE(DUP(op_EQ_202), op_LSHIFT_224, VARL("h_tmp416")); + RzILOpPure *op_AND_274 = LOGAND(cond_272, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_278 = SHIFTL0(op_AND_274, SN(32, 0x20)); + RzILOpPure *op_OR_279 = LOGOR(op_AND_149, op_LSHIFT_278); + RzILOpEffect *op_ASSIGN_280 = WRITE_REG(bundle, Rdd_op, op_OR_279); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_281 = SEQN(2, seq_271, op_ASSIGN_280); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_141, seq_281); + return instruction_sequence; +} + +// Rdd = vmpyhsu(Rs,Rt):<<1:sat +RzILOpEffect *hex_il_op_m2_vmpy2su_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_87 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_17 = LOGAND(op_RSHIFT_15, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_24 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_26 = LOGAND(op_RSHIFT_24, SN(32, 0xffff)); + RzILOpPure *op_MUL_30 = MUL(CAST(32, IL_FALSE, CAST(32, MSB(CAST(16, MSB(op_AND_17), DUP(op_AND_17))), CAST(16, MSB(DUP(op_AND_17)), DUP(op_AND_17)))), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_26))); + RzILOpPure *op_LSHIFT_34 = SHIFTL0(CAST(64, MSB(CAST(32, IL_FALSE, op_MUL_30)), CAST(32, IL_FALSE, DUP(op_MUL_30))), SN(32, 1)); + RzILOpPure *op_RSHIFT_43 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_45 = LOGAND(op_RSHIFT_43, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_51 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_53 = LOGAND(op_RSHIFT_51, SN(32, 0xffff)); + RzILOpPure *op_MUL_57 = MUL(CAST(32, IL_FALSE, CAST(32, MSB(CAST(16, MSB(op_AND_45), DUP(op_AND_45))), CAST(16, MSB(DUP(op_AND_45)), DUP(op_AND_45)))), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_53))); + RzILOpPure *op_LSHIFT_61 = SHIFTL0(CAST(64, MSB(CAST(32, IL_FALSE, op_MUL_57)), CAST(32, IL_FALSE, DUP(op_MUL_57))), SN(32, 1)); + RzILOpPure *op_EQ_62 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_LSHIFT_34), SN(32, 0), SN(32, 0x20)), op_LSHIFT_61); + RzILOpPure *op_RSHIFT_91 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_93 = LOGAND(op_RSHIFT_91, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_99 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_101 = LOGAND(op_RSHIFT_99, SN(32, 0xffff)); + RzILOpPure *op_MUL_105 = MUL(CAST(32, IL_FALSE, CAST(32, MSB(CAST(16, MSB(op_AND_93), DUP(op_AND_93))), CAST(16, MSB(DUP(op_AND_93)), DUP(op_AND_93)))), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_101))); + RzILOpPure *op_LSHIFT_109 = SHIFTL0(CAST(64, MSB(CAST(32, IL_FALSE, op_MUL_105)), CAST(32, IL_FALSE, DUP(op_MUL_105))), SN(32, 1)); + RzILOpPure *op_LT_112 = SLT(op_LSHIFT_109, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_117 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_118 = NEG(op_LSHIFT_117); + RzILOpPure *op_LSHIFT_123 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_126 = SUB(op_LSHIFT_123, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_127 = ITE(op_LT_112, op_NEG_118, op_SUB_126); + RzILOpEffect *gcc_expr_128 = BRANCH(op_EQ_62, EMPTY(), set_usr_field_call_87); + + // h_tmp417 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_130 = SETL("h_tmp417", cond_127); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((ut32 ...; + RzILOpEffect *seq_131 = SEQN(2, gcc_expr_128, op_ASSIGN_hybrid_tmp_130); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x1)) ? (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x0) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))))) << 0x1) : h_tmp417) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_66 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_68 = LOGAND(op_RSHIFT_66, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_74 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_76 = LOGAND(op_RSHIFT_74, SN(32, 0xffff)); + RzILOpPure *op_MUL_80 = MUL(CAST(32, IL_FALSE, CAST(32, MSB(CAST(16, MSB(op_AND_68), DUP(op_AND_68))), CAST(16, MSB(DUP(op_AND_68)), DUP(op_AND_68)))), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_76))); + RzILOpPure *op_LSHIFT_84 = SHIFTL0(CAST(64, MSB(CAST(32, IL_FALSE, op_MUL_80)), CAST(32, IL_FALSE, DUP(op_MUL_80))), SN(32, 1)); + RzILOpPure *cond_132 = ITE(DUP(op_EQ_62), op_LSHIFT_84, VARL("h_tmp417")); + RzILOpPure *op_AND_134 = LOGAND(cond_132, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_138 = SHIFTL0(op_AND_134, SN(32, 0)); + RzILOpPure *op_OR_139 = LOGOR(op_AND_7, op_LSHIFT_138); + RzILOpEffect *op_ASSIGN_140 = WRITE_REG(bundle, Rdd_op, op_OR_139); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_141 = SEQN(2, seq_131, op_ASSIGN_140); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_227 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_156 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_158 = LOGAND(op_RSHIFT_156, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_164 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_166 = LOGAND(op_RSHIFT_164, SN(32, 0xffff)); + RzILOpPure *op_MUL_170 = MUL(CAST(32, IL_FALSE, CAST(32, MSB(CAST(16, MSB(op_AND_158), DUP(op_AND_158))), CAST(16, MSB(DUP(op_AND_158)), DUP(op_AND_158)))), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_166))); + RzILOpPure *op_LSHIFT_174 = SHIFTL0(CAST(64, MSB(CAST(32, IL_FALSE, op_MUL_170)), CAST(32, IL_FALSE, DUP(op_MUL_170))), SN(32, 1)); + RzILOpPure *op_RSHIFT_183 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_185 = LOGAND(op_RSHIFT_183, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_191 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_193 = LOGAND(op_RSHIFT_191, SN(32, 0xffff)); + RzILOpPure *op_MUL_197 = MUL(CAST(32, IL_FALSE, CAST(32, MSB(CAST(16, MSB(op_AND_185), DUP(op_AND_185))), CAST(16, MSB(DUP(op_AND_185)), DUP(op_AND_185)))), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_193))); + RzILOpPure *op_LSHIFT_201 = SHIFTL0(CAST(64, MSB(CAST(32, IL_FALSE, op_MUL_197)), CAST(32, IL_FALSE, DUP(op_MUL_197))), SN(32, 1)); + RzILOpPure *op_EQ_202 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_LSHIFT_174), SN(32, 0), SN(32, 0x20)), op_LSHIFT_201); + RzILOpPure *op_RSHIFT_231 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_233 = LOGAND(op_RSHIFT_231, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_239 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_241 = LOGAND(op_RSHIFT_239, SN(32, 0xffff)); + RzILOpPure *op_MUL_245 = MUL(CAST(32, IL_FALSE, CAST(32, MSB(CAST(16, MSB(op_AND_233), DUP(op_AND_233))), CAST(16, MSB(DUP(op_AND_233)), DUP(op_AND_233)))), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_241))); + RzILOpPure *op_LSHIFT_249 = SHIFTL0(CAST(64, MSB(CAST(32, IL_FALSE, op_MUL_245)), CAST(32, IL_FALSE, DUP(op_MUL_245))), SN(32, 1)); + RzILOpPure *op_LT_252 = SLT(op_LSHIFT_249, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_257 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_258 = NEG(op_LSHIFT_257); + RzILOpPure *op_LSHIFT_263 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_266 = SUB(op_LSHIFT_263, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_267 = ITE(op_LT_252, op_NEG_258, op_SUB_266); + RzILOpEffect *gcc_expr_268 = BRANCH(op_EQ_202, EMPTY(), set_usr_field_call_227); + + // h_tmp418 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_270 = SETL("h_tmp418", cond_267); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((ut32 ...; + RzILOpEffect *seq_271 = SEQN(2, gcc_expr_268, op_ASSIGN_hybrid_tmp_270); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x1)) ? (((st64) ((st32) ((ut32) ((st32) ((st16) ((Rs >> 0x10) & 0xffff)))) * ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))))) << 0x1) : h_tmp418) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_147 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_148 = LOGNOT(op_LSHIFT_147); + RzILOpPure *op_AND_149 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_148); + RzILOpPure *op_RSHIFT_206 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_208 = LOGAND(op_RSHIFT_206, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_214 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_216 = LOGAND(op_RSHIFT_214, SN(32, 0xffff)); + RzILOpPure *op_MUL_220 = MUL(CAST(32, IL_FALSE, CAST(32, MSB(CAST(16, MSB(op_AND_208), DUP(op_AND_208))), CAST(16, MSB(DUP(op_AND_208)), DUP(op_AND_208)))), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_216))); + RzILOpPure *op_LSHIFT_224 = SHIFTL0(CAST(64, MSB(CAST(32, IL_FALSE, op_MUL_220)), CAST(32, IL_FALSE, DUP(op_MUL_220))), SN(32, 1)); + RzILOpPure *cond_272 = ITE(DUP(op_EQ_202), op_LSHIFT_224, VARL("h_tmp418")); + RzILOpPure *op_AND_274 = LOGAND(cond_272, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_278 = SHIFTL0(op_AND_274, SN(32, 0x20)); + RzILOpPure *op_OR_279 = LOGOR(op_AND_149, op_LSHIFT_278); + RzILOpEffect *op_ASSIGN_280 = WRITE_REG(bundle, Rdd_op, op_OR_279); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_281 = SEQN(2, seq_271, op_ASSIGN_280); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_141, seq_281); + return instruction_sequence; +} + +// Rd = vraddh(Rss,Rtt) +RzILOpEffect *hex_il_op_m2_vraddh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rd = 0x0; + RzILOpEffect *op_ASSIGN_3 = WRITE_REG(bundle, Rd_op, SN(32, 0)); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_5 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_8 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp419 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_10 = SETL("h_tmp419", VARL("i")); + + // seq(h_tmp419 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_11 = SEQN(2, op_ASSIGN_hybrid_tmp_10, op_INC_8); + + // Rd = Rd + ((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))); + RzILOpPure *op_MUL_14 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rss, op_MUL_14); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_15, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_22 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rtt, op_MUL_22); + RzILOpPure *op_AND_26 = LOGAND(op_RSHIFT_23, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_30 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(CAST(16, MSB(op_AND_26), DUP(op_AND_26))), CAST(16, MSB(DUP(op_AND_26)), DUP(op_AND_26)))); + RzILOpPure *op_ADD_31 = ADD(READ_REG(pkt, Rd_op, true), op_ADD_30); + RzILOpEffect *op_ASSIGN_ADD_32 = WRITE_REG(bundle, Rd_op, op_ADD_31); + + // seq(h_tmp419; Rd = Rd + ((st32) ((st16) ((Rss >> i * 0x10) & ((s ...; + RzILOpEffect *seq_33 = op_ASSIGN_ADD_32; + + // seq(seq(h_tmp419; Rd = Rd + ((st32) ((st16) ((Rss >> i * 0x10) & ...; + RzILOpEffect *seq_34 = SEQN(2, seq_33, seq_11); + + // while ((i < 0x4)) { seq(seq(h_tmp419; Rd = Rd + ((st32) ((st16) ((Rss >> i * 0x10) & ... }; + RzILOpPure *op_LT_7 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_35 = REPEAT(op_LT_7, seq_34); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp419; Rd = Rd + ((s ...; + RzILOpEffect *seq_36 = SEQN(2, op_ASSIGN_5, for_35); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_3, seq_36); + return instruction_sequence; +} + +// Rd = vradduh(Rss,Rtt) +RzILOpEffect *hex_il_op_m2_vradduh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rd = 0x0; + RzILOpEffect *op_ASSIGN_3 = WRITE_REG(bundle, Rd_op, SN(32, 0)); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_5 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_8 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp420 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_10 = SETL("h_tmp420", VARL("i")); + + // seq(h_tmp420 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_11 = SEQN(2, op_ASSIGN_hybrid_tmp_10, op_INC_8); + + // Rd = Rd + ((st32) ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) + ((st32) ((ut16) ((Rtt >> i * 0x10) & ((st64) 0xffff)))); + RzILOpPure *op_MUL_14 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rss, op_MUL_14); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_15, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_22 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rtt, op_MUL_22); + RzILOpPure *op_AND_26 = LOGAND(op_RSHIFT_23, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_30 = ADD(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_18)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_26))); + RzILOpPure *op_ADD_31 = ADD(READ_REG(pkt, Rd_op, true), op_ADD_30); + RzILOpEffect *op_ASSIGN_ADD_32 = WRITE_REG(bundle, Rd_op, op_ADD_31); + + // seq(h_tmp420; Rd = Rd + ((st32) ((ut16) ((Rss >> i * 0x10) & ((s ...; + RzILOpEffect *seq_33 = op_ASSIGN_ADD_32; + + // seq(seq(h_tmp420; Rd = Rd + ((st32) ((ut16) ((Rss >> i * 0x10) & ...; + RzILOpEffect *seq_34 = SEQN(2, seq_33, seq_11); + + // while ((i < 0x4)) { seq(seq(h_tmp420; Rd = Rd + ((st32) ((ut16) ((Rss >> i * 0x10) & ... }; + RzILOpPure *op_LT_7 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_35 = REPEAT(op_LT_7, seq_34); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp420; Rd = Rd + ((s ...; + RzILOpEffect *seq_36 = SEQN(2, op_ASSIGN_5, for_35); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_3, seq_36); + return instruction_sequence; +} + +// Rxx += vrcmpyi(Rss,Rtt) +RzILOpEffect *hex_il_op_m2_vrcmaci_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rxx = Rxx + ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rss, SN(32, 16)); + RzILOpPure *op_AND_8 = LOGAND(op_RSHIFT_5, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_15, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_21 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_8), DUP(op_AND_8))), CAST(16, MSB(DUP(op_AND_8)), DUP(op_AND_8))), CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18)))); + RzILOpPure *op_ADD_23 = ADD(READ_REG(pkt, Rxx_op, false), CAST(64, MSB(op_MUL_21), DUP(op_MUL_21))); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_27, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_36 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_39 = LOGAND(op_RSHIFT_36, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_42 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_30), DUP(op_AND_30))), CAST(16, MSB(DUP(op_AND_30)), DUP(op_AND_30))), CAST(32, MSB(CAST(16, MSB(op_AND_39), DUP(op_AND_39))), CAST(16, MSB(DUP(op_AND_39)), DUP(op_AND_39)))); + RzILOpPure *op_ADD_44 = ADD(op_ADD_23, CAST(64, MSB(op_MUL_42), DUP(op_MUL_42))); + RzILOpPure *op_RSHIFT_48 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_51 = LOGAND(op_RSHIFT_48, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_57 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_60 = LOGAND(op_RSHIFT_57, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_63 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_51), DUP(op_AND_51))), CAST(16, MSB(DUP(op_AND_51)), DUP(op_AND_51))), CAST(32, MSB(CAST(16, MSB(op_AND_60), DUP(op_AND_60))), CAST(16, MSB(DUP(op_AND_60)), DUP(op_AND_60)))); + RzILOpPure *op_ADD_65 = ADD(op_ADD_44, CAST(64, MSB(op_MUL_63), DUP(op_MUL_63))); + RzILOpPure *op_RSHIFT_69 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_72 = LOGAND(op_RSHIFT_69, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_78 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_81 = LOGAND(op_RSHIFT_78, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_84 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_72), DUP(op_AND_72))), CAST(16, MSB(DUP(op_AND_72)), DUP(op_AND_72))), CAST(32, MSB(CAST(16, MSB(op_AND_81), DUP(op_AND_81))), CAST(16, MSB(DUP(op_AND_81)), DUP(op_AND_81)))); + RzILOpPure *op_ADD_86 = ADD(op_ADD_65, CAST(64, MSB(op_MUL_84), DUP(op_MUL_84))); + RzILOpEffect *op_ASSIGN_87 = WRITE_REG(bundle, Rxx_op, op_ADD_86); + + RzILOpEffect *instruction_sequence = op_ASSIGN_87; + return instruction_sequence; +} + +// Rxx += vrcmpyi(Rss,Rtt*) +RzILOpEffect *hex_il_op_m2_vrcmaci_s0c(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rxx = Rxx + ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rss, SN(32, 16)); + RzILOpPure *op_AND_8 = LOGAND(op_RSHIFT_5, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_15, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_21 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_8), DUP(op_AND_8))), CAST(16, MSB(DUP(op_AND_8)), DUP(op_AND_8))), CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18)))); + RzILOpPure *op_ADD_23 = ADD(READ_REG(pkt, Rxx_op, false), CAST(64, MSB(op_MUL_21), DUP(op_MUL_21))); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_27, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_36 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_39 = LOGAND(op_RSHIFT_36, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_42 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_30), DUP(op_AND_30))), CAST(16, MSB(DUP(op_AND_30)), DUP(op_AND_30))), CAST(32, MSB(CAST(16, MSB(op_AND_39), DUP(op_AND_39))), CAST(16, MSB(DUP(op_AND_39)), DUP(op_AND_39)))); + RzILOpPure *op_SUB_44 = SUB(op_ADD_23, CAST(64, MSB(op_MUL_42), DUP(op_MUL_42))); + RzILOpPure *op_RSHIFT_48 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_51 = LOGAND(op_RSHIFT_48, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_57 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_60 = LOGAND(op_RSHIFT_57, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_63 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_51), DUP(op_AND_51))), CAST(16, MSB(DUP(op_AND_51)), DUP(op_AND_51))), CAST(32, MSB(CAST(16, MSB(op_AND_60), DUP(op_AND_60))), CAST(16, MSB(DUP(op_AND_60)), DUP(op_AND_60)))); + RzILOpPure *op_ADD_65 = ADD(op_SUB_44, CAST(64, MSB(op_MUL_63), DUP(op_MUL_63))); + RzILOpPure *op_RSHIFT_69 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_72 = LOGAND(op_RSHIFT_69, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_78 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_81 = LOGAND(op_RSHIFT_78, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_84 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_72), DUP(op_AND_72))), CAST(16, MSB(DUP(op_AND_72)), DUP(op_AND_72))), CAST(32, MSB(CAST(16, MSB(op_AND_81), DUP(op_AND_81))), CAST(16, MSB(DUP(op_AND_81)), DUP(op_AND_81)))); + RzILOpPure *op_SUB_86 = SUB(op_ADD_65, CAST(64, MSB(op_MUL_84), DUP(op_MUL_84))); + RzILOpEffect *op_ASSIGN_87 = WRITE_REG(bundle, Rxx_op, op_SUB_86); + + RzILOpEffect *instruction_sequence = op_ASSIGN_87; + return instruction_sequence; +} + +// Rxx += vrcmpyr(Rss,Rtt) +RzILOpEffect *hex_il_op_m2_vrcmacr_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rxx = Rxx + ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_8 = LOGAND(op_RSHIFT_5, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_15, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_21 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_8), DUP(op_AND_8))), CAST(16, MSB(DUP(op_AND_8)), DUP(op_AND_8))), CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18)))); + RzILOpPure *op_ADD_23 = ADD(READ_REG(pkt, Rxx_op, false), CAST(64, MSB(op_MUL_21), DUP(op_MUL_21))); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_27, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_36 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_39 = LOGAND(op_RSHIFT_36, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_42 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_30), DUP(op_AND_30))), CAST(16, MSB(DUP(op_AND_30)), DUP(op_AND_30))), CAST(32, MSB(CAST(16, MSB(op_AND_39), DUP(op_AND_39))), CAST(16, MSB(DUP(op_AND_39)), DUP(op_AND_39)))); + RzILOpPure *op_SUB_44 = SUB(op_ADD_23, CAST(64, MSB(op_MUL_42), DUP(op_MUL_42))); + RzILOpPure *op_RSHIFT_48 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_51 = LOGAND(op_RSHIFT_48, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_57 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_60 = LOGAND(op_RSHIFT_57, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_63 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_51), DUP(op_AND_51))), CAST(16, MSB(DUP(op_AND_51)), DUP(op_AND_51))), CAST(32, MSB(CAST(16, MSB(op_AND_60), DUP(op_AND_60))), CAST(16, MSB(DUP(op_AND_60)), DUP(op_AND_60)))); + RzILOpPure *op_ADD_65 = ADD(op_SUB_44, CAST(64, MSB(op_MUL_63), DUP(op_MUL_63))); + RzILOpPure *op_RSHIFT_69 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_72 = LOGAND(op_RSHIFT_69, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_78 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_81 = LOGAND(op_RSHIFT_78, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_84 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_72), DUP(op_AND_72))), CAST(16, MSB(DUP(op_AND_72)), DUP(op_AND_72))), CAST(32, MSB(CAST(16, MSB(op_AND_81), DUP(op_AND_81))), CAST(16, MSB(DUP(op_AND_81)), DUP(op_AND_81)))); + RzILOpPure *op_SUB_86 = SUB(op_ADD_65, CAST(64, MSB(op_MUL_84), DUP(op_MUL_84))); + RzILOpEffect *op_ASSIGN_87 = WRITE_REG(bundle, Rxx_op, op_SUB_86); + + RzILOpEffect *instruction_sequence = op_ASSIGN_87; + return instruction_sequence; +} + +// Rxx += vrcmpyr(Rss,Rtt*) +RzILOpEffect *hex_il_op_m2_vrcmacr_s0c(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rxx = Rxx + ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_8 = LOGAND(op_RSHIFT_5, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_15, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_21 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_8), DUP(op_AND_8))), CAST(16, MSB(DUP(op_AND_8)), DUP(op_AND_8))), CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18)))); + RzILOpPure *op_ADD_23 = ADD(READ_REG(pkt, Rxx_op, false), CAST(64, MSB(op_MUL_21), DUP(op_MUL_21))); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_27, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_36 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_39 = LOGAND(op_RSHIFT_36, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_42 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_30), DUP(op_AND_30))), CAST(16, MSB(DUP(op_AND_30)), DUP(op_AND_30))), CAST(32, MSB(CAST(16, MSB(op_AND_39), DUP(op_AND_39))), CAST(16, MSB(DUP(op_AND_39)), DUP(op_AND_39)))); + RzILOpPure *op_ADD_44 = ADD(op_ADD_23, CAST(64, MSB(op_MUL_42), DUP(op_MUL_42))); + RzILOpPure *op_RSHIFT_48 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_51 = LOGAND(op_RSHIFT_48, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_57 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_60 = LOGAND(op_RSHIFT_57, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_63 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_51), DUP(op_AND_51))), CAST(16, MSB(DUP(op_AND_51)), DUP(op_AND_51))), CAST(32, MSB(CAST(16, MSB(op_AND_60), DUP(op_AND_60))), CAST(16, MSB(DUP(op_AND_60)), DUP(op_AND_60)))); + RzILOpPure *op_ADD_65 = ADD(op_ADD_44, CAST(64, MSB(op_MUL_63), DUP(op_MUL_63))); + RzILOpPure *op_RSHIFT_69 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_72 = LOGAND(op_RSHIFT_69, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_78 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_81 = LOGAND(op_RSHIFT_78, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_84 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_72), DUP(op_AND_72))), CAST(16, MSB(DUP(op_AND_72)), DUP(op_AND_72))), CAST(32, MSB(CAST(16, MSB(op_AND_81), DUP(op_AND_81))), CAST(16, MSB(DUP(op_AND_81)), DUP(op_AND_81)))); + RzILOpPure *op_ADD_86 = ADD(op_ADD_65, CAST(64, MSB(op_MUL_84), DUP(op_MUL_84))); + RzILOpEffect *op_ASSIGN_87 = WRITE_REG(bundle, Rxx_op, op_ADD_86); + + RzILOpEffect *instruction_sequence = op_ASSIGN_87; + return instruction_sequence; +} + +// Rdd = vrcmpyi(Rss,Rtt) +RzILOpEffect *hex_il_op_m2_vrcmpyi_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rdd = ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rss, SN(32, 16)); + RzILOpPure *op_AND_8 = LOGAND(op_RSHIFT_5, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_15, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_21 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_8), DUP(op_AND_8))), CAST(16, MSB(DUP(op_AND_8)), DUP(op_AND_8))), CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18)))); + RzILOpPure *op_RSHIFT_26 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_29 = LOGAND(op_RSHIFT_26, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_35, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_41 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_29), DUP(op_AND_29))), CAST(16, MSB(DUP(op_AND_29)), DUP(op_AND_29))), CAST(32, MSB(CAST(16, MSB(op_AND_38), DUP(op_AND_38))), CAST(16, MSB(DUP(op_AND_38)), DUP(op_AND_38)))); + RzILOpPure *op_ADD_43 = ADD(CAST(64, MSB(op_MUL_21), DUP(op_MUL_21)), CAST(64, MSB(op_MUL_41), DUP(op_MUL_41))); + RzILOpPure *op_RSHIFT_47 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_50 = LOGAND(op_RSHIFT_47, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_56 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_59 = LOGAND(op_RSHIFT_56, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_62 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_50), DUP(op_AND_50))), CAST(16, MSB(DUP(op_AND_50)), DUP(op_AND_50))), CAST(32, MSB(CAST(16, MSB(op_AND_59), DUP(op_AND_59))), CAST(16, MSB(DUP(op_AND_59)), DUP(op_AND_59)))); + RzILOpPure *op_ADD_64 = ADD(op_ADD_43, CAST(64, MSB(op_MUL_62), DUP(op_MUL_62))); + RzILOpPure *op_RSHIFT_68 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_71 = LOGAND(op_RSHIFT_68, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_77 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_80 = LOGAND(op_RSHIFT_77, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_83 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_71), DUP(op_AND_71))), CAST(16, MSB(DUP(op_AND_71)), DUP(op_AND_71))), CAST(32, MSB(CAST(16, MSB(op_AND_80), DUP(op_AND_80))), CAST(16, MSB(DUP(op_AND_80)), DUP(op_AND_80)))); + RzILOpPure *op_ADD_85 = ADD(op_ADD_64, CAST(64, MSB(op_MUL_83), DUP(op_MUL_83))); + RzILOpEffect *op_ASSIGN_86 = WRITE_REG(bundle, Rdd_op, op_ADD_85); + + RzILOpEffect *instruction_sequence = op_ASSIGN_86; + return instruction_sequence; +} + +// Rdd = vrcmpyi(Rss,Rtt*) +RzILOpEffect *hex_il_op_m2_vrcmpyi_s0c(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rdd = ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rss, SN(32, 16)); + RzILOpPure *op_AND_8 = LOGAND(op_RSHIFT_5, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_15, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_21 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_8), DUP(op_AND_8))), CAST(16, MSB(DUP(op_AND_8)), DUP(op_AND_8))), CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18)))); + RzILOpPure *op_RSHIFT_26 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_29 = LOGAND(op_RSHIFT_26, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_35, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_41 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_29), DUP(op_AND_29))), CAST(16, MSB(DUP(op_AND_29)), DUP(op_AND_29))), CAST(32, MSB(CAST(16, MSB(op_AND_38), DUP(op_AND_38))), CAST(16, MSB(DUP(op_AND_38)), DUP(op_AND_38)))); + RzILOpPure *op_SUB_43 = SUB(CAST(64, MSB(op_MUL_21), DUP(op_MUL_21)), CAST(64, MSB(op_MUL_41), DUP(op_MUL_41))); + RzILOpPure *op_RSHIFT_47 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_50 = LOGAND(op_RSHIFT_47, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_56 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_59 = LOGAND(op_RSHIFT_56, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_62 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_50), DUP(op_AND_50))), CAST(16, MSB(DUP(op_AND_50)), DUP(op_AND_50))), CAST(32, MSB(CAST(16, MSB(op_AND_59), DUP(op_AND_59))), CAST(16, MSB(DUP(op_AND_59)), DUP(op_AND_59)))); + RzILOpPure *op_ADD_64 = ADD(op_SUB_43, CAST(64, MSB(op_MUL_62), DUP(op_MUL_62))); + RzILOpPure *op_RSHIFT_68 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_71 = LOGAND(op_RSHIFT_68, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_77 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_80 = LOGAND(op_RSHIFT_77, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_83 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_71), DUP(op_AND_71))), CAST(16, MSB(DUP(op_AND_71)), DUP(op_AND_71))), CAST(32, MSB(CAST(16, MSB(op_AND_80), DUP(op_AND_80))), CAST(16, MSB(DUP(op_AND_80)), DUP(op_AND_80)))); + RzILOpPure *op_SUB_85 = SUB(op_ADD_64, CAST(64, MSB(op_MUL_83), DUP(op_MUL_83))); + RzILOpEffect *op_ASSIGN_86 = WRITE_REG(bundle, Rdd_op, op_SUB_85); + + RzILOpEffect *instruction_sequence = op_ASSIGN_86; + return instruction_sequence; +} + +// Rdd = vrcmpyr(Rss,Rtt) +RzILOpEffect *hex_il_op_m2_vrcmpyr_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rdd = ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) - ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_8 = LOGAND(op_RSHIFT_5, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_15, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_21 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_8), DUP(op_AND_8))), CAST(16, MSB(DUP(op_AND_8)), DUP(op_AND_8))), CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18)))); + RzILOpPure *op_RSHIFT_26 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_29 = LOGAND(op_RSHIFT_26, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_35, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_41 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_29), DUP(op_AND_29))), CAST(16, MSB(DUP(op_AND_29)), DUP(op_AND_29))), CAST(32, MSB(CAST(16, MSB(op_AND_38), DUP(op_AND_38))), CAST(16, MSB(DUP(op_AND_38)), DUP(op_AND_38)))); + RzILOpPure *op_SUB_43 = SUB(CAST(64, MSB(op_MUL_21), DUP(op_MUL_21)), CAST(64, MSB(op_MUL_41), DUP(op_MUL_41))); + RzILOpPure *op_RSHIFT_47 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_50 = LOGAND(op_RSHIFT_47, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_56 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_59 = LOGAND(op_RSHIFT_56, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_62 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_50), DUP(op_AND_50))), CAST(16, MSB(DUP(op_AND_50)), DUP(op_AND_50))), CAST(32, MSB(CAST(16, MSB(op_AND_59), DUP(op_AND_59))), CAST(16, MSB(DUP(op_AND_59)), DUP(op_AND_59)))); + RzILOpPure *op_ADD_64 = ADD(op_SUB_43, CAST(64, MSB(op_MUL_62), DUP(op_MUL_62))); + RzILOpPure *op_RSHIFT_68 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_71 = LOGAND(op_RSHIFT_68, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_77 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_80 = LOGAND(op_RSHIFT_77, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_83 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_71), DUP(op_AND_71))), CAST(16, MSB(DUP(op_AND_71)), DUP(op_AND_71))), CAST(32, MSB(CAST(16, MSB(op_AND_80), DUP(op_AND_80))), CAST(16, MSB(DUP(op_AND_80)), DUP(op_AND_80)))); + RzILOpPure *op_SUB_85 = SUB(op_ADD_64, CAST(64, MSB(op_MUL_83), DUP(op_MUL_83))); + RzILOpEffect *op_ASSIGN_86 = WRITE_REG(bundle, Rdd_op, op_SUB_85); + + RzILOpEffect *instruction_sequence = op_ASSIGN_86; + return instruction_sequence; +} + +// Rdd = vrcmpyr(Rss,Rtt*) +RzILOpEffect *hex_il_op_m2_vrcmpyr_s0c(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rdd = ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_8 = LOGAND(op_RSHIFT_5, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_15, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_21 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_8), DUP(op_AND_8))), CAST(16, MSB(DUP(op_AND_8)), DUP(op_AND_8))), CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18)))); + RzILOpPure *op_RSHIFT_26 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_29 = LOGAND(op_RSHIFT_26, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_35, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_41 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_29), DUP(op_AND_29))), CAST(16, MSB(DUP(op_AND_29)), DUP(op_AND_29))), CAST(32, MSB(CAST(16, MSB(op_AND_38), DUP(op_AND_38))), CAST(16, MSB(DUP(op_AND_38)), DUP(op_AND_38)))); + RzILOpPure *op_ADD_43 = ADD(CAST(64, MSB(op_MUL_21), DUP(op_MUL_21)), CAST(64, MSB(op_MUL_41), DUP(op_MUL_41))); + RzILOpPure *op_RSHIFT_47 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_50 = LOGAND(op_RSHIFT_47, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_56 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_59 = LOGAND(op_RSHIFT_56, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_62 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_50), DUP(op_AND_50))), CAST(16, MSB(DUP(op_AND_50)), DUP(op_AND_50))), CAST(32, MSB(CAST(16, MSB(op_AND_59), DUP(op_AND_59))), CAST(16, MSB(DUP(op_AND_59)), DUP(op_AND_59)))); + RzILOpPure *op_ADD_64 = ADD(op_ADD_43, CAST(64, MSB(op_MUL_62), DUP(op_MUL_62))); + RzILOpPure *op_RSHIFT_68 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_71 = LOGAND(op_RSHIFT_68, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_77 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_80 = LOGAND(op_RSHIFT_77, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_83 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_71), DUP(op_AND_71))), CAST(16, MSB(DUP(op_AND_71)), DUP(op_AND_71))), CAST(32, MSB(CAST(16, MSB(op_AND_80), DUP(op_AND_80))), CAST(16, MSB(DUP(op_AND_80)), DUP(op_AND_80)))); + RzILOpPure *op_ADD_85 = ADD(op_ADD_64, CAST(64, MSB(op_MUL_83), DUP(op_MUL_83))); + RzILOpEffect *op_ASSIGN_86 = WRITE_REG(bundle, Rdd_op, op_ADD_85); + + RzILOpEffect *instruction_sequence = op_ASSIGN_86; + return instruction_sequence; +} + +// Rxx += vrcmpys(Rss,Rtt):<<1:sat:raw:hi +RzILOpEffect *hex_il_op_m2_vrcmpys_acc_s1_h(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_231 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rss, SN(32, 16)); + RzILOpPure *op_AND_26 = LOGAND(op_RSHIFT_23, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_33 = SHIFTRA(Rtt, SN(32, 0x20)); + RzILOpPure *op_AND_35 = LOGAND(op_RSHIFT_33, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_41 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_35), DUP(op_AND_35))), CAST(32, MSB(DUP(op_AND_35)), DUP(op_AND_35))), SN(32, 0)); + RzILOpPure *op_AND_44 = LOGAND(op_RSHIFT_41, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_47 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_26), DUP(op_AND_26))), CAST(16, MSB(DUP(op_AND_26)), DUP(op_AND_26))), CAST(32, MSB(CAST(16, MSB(op_AND_44), DUP(op_AND_44))), CAST(16, MSB(DUP(op_AND_44)), DUP(op_AND_44)))); + RzILOpPure *op_LSHIFT_50 = SHIFTL0(CAST(64, MSB(op_MUL_47), DUP(op_MUL_47)), SN(32, 1)); + RzILOpPure *op_ADD_51 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_16), DUP(op_AND_16))), CAST(32, MSB(DUP(op_AND_16)), DUP(op_AND_16))), op_LSHIFT_50); + RzILOpPure *op_RSHIFT_55 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_58 = LOGAND(op_RSHIFT_55, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_64 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_66 = LOGAND(op_RSHIFT_64, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_72 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_66), DUP(op_AND_66))), CAST(32, MSB(DUP(op_AND_66)), DUP(op_AND_66))), SN(32, 16)); + RzILOpPure *op_AND_75 = LOGAND(op_RSHIFT_72, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_78 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_58), DUP(op_AND_58))), CAST(16, MSB(DUP(op_AND_58)), DUP(op_AND_58))), CAST(32, MSB(CAST(16, MSB(op_AND_75), DUP(op_AND_75))), CAST(16, MSB(DUP(op_AND_75)), DUP(op_AND_75)))); + RzILOpPure *op_LSHIFT_81 = SHIFTL0(CAST(64, MSB(op_MUL_78), DUP(op_MUL_78)), SN(32, 1)); + RzILOpPure *op_ADD_82 = ADD(op_ADD_51, op_LSHIFT_81); + RzILOpPure *op_RSHIFT_91 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_93 = LOGAND(op_RSHIFT_91, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_99 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_102 = LOGAND(op_RSHIFT_99, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_108 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_110 = LOGAND(op_RSHIFT_108, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_116 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_110), DUP(op_AND_110))), CAST(32, MSB(DUP(op_AND_110)), DUP(op_AND_110))), SN(32, 0)); + RzILOpPure *op_AND_119 = LOGAND(op_RSHIFT_116, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_122 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_102), DUP(op_AND_102))), CAST(16, MSB(DUP(op_AND_102)), DUP(op_AND_102))), CAST(32, MSB(CAST(16, MSB(op_AND_119), DUP(op_AND_119))), CAST(16, MSB(DUP(op_AND_119)), DUP(op_AND_119)))); + RzILOpPure *op_LSHIFT_125 = SHIFTL0(CAST(64, MSB(op_MUL_122), DUP(op_MUL_122)), SN(32, 1)); + RzILOpPure *op_ADD_126 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_93), DUP(op_AND_93))), CAST(32, MSB(DUP(op_AND_93)), DUP(op_AND_93))), op_LSHIFT_125); + RzILOpPure *op_RSHIFT_130 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_133 = LOGAND(op_RSHIFT_130, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_139 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_141 = LOGAND(op_RSHIFT_139, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_147 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_141), DUP(op_AND_141))), CAST(32, MSB(DUP(op_AND_141)), DUP(op_AND_141))), SN(32, 16)); + RzILOpPure *op_AND_150 = LOGAND(op_RSHIFT_147, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_153 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_133), DUP(op_AND_133))), CAST(16, MSB(DUP(op_AND_133)), DUP(op_AND_133))), CAST(32, MSB(CAST(16, MSB(op_AND_150), DUP(op_AND_150))), CAST(16, MSB(DUP(op_AND_150)), DUP(op_AND_150)))); + RzILOpPure *op_LSHIFT_156 = SHIFTL0(CAST(64, MSB(op_MUL_153), DUP(op_MUL_153)), SN(32, 1)); + RzILOpPure *op_ADD_157 = ADD(op_ADD_126, op_LSHIFT_156); + RzILOpPure *op_EQ_158 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_82), SN(32, 0), SN(32, 0x20)), op_ADD_157); + RzILOpPure *op_RSHIFT_235 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_237 = LOGAND(op_RSHIFT_235, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_243 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_246 = LOGAND(op_RSHIFT_243, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_252 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_254 = LOGAND(op_RSHIFT_252, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_260 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_254), DUP(op_AND_254))), CAST(32, MSB(DUP(op_AND_254)), DUP(op_AND_254))), SN(32, 0)); + RzILOpPure *op_AND_263 = LOGAND(op_RSHIFT_260, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_266 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_246), DUP(op_AND_246))), CAST(16, MSB(DUP(op_AND_246)), DUP(op_AND_246))), CAST(32, MSB(CAST(16, MSB(op_AND_263), DUP(op_AND_263))), CAST(16, MSB(DUP(op_AND_263)), DUP(op_AND_263)))); + RzILOpPure *op_LSHIFT_269 = SHIFTL0(CAST(64, MSB(op_MUL_266), DUP(op_MUL_266)), SN(32, 1)); + RzILOpPure *op_ADD_270 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_237), DUP(op_AND_237))), CAST(32, MSB(DUP(op_AND_237)), DUP(op_AND_237))), op_LSHIFT_269); + RzILOpPure *op_RSHIFT_274 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_277 = LOGAND(op_RSHIFT_274, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_283 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_285 = LOGAND(op_RSHIFT_283, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_291 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_285), DUP(op_AND_285))), CAST(32, MSB(DUP(op_AND_285)), DUP(op_AND_285))), SN(32, 16)); + RzILOpPure *op_AND_294 = LOGAND(op_RSHIFT_291, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_297 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_277), DUP(op_AND_277))), CAST(16, MSB(DUP(op_AND_277)), DUP(op_AND_277))), CAST(32, MSB(CAST(16, MSB(op_AND_294), DUP(op_AND_294))), CAST(16, MSB(DUP(op_AND_294)), DUP(op_AND_294)))); + RzILOpPure *op_LSHIFT_300 = SHIFTL0(CAST(64, MSB(op_MUL_297), DUP(op_MUL_297)), SN(32, 1)); + RzILOpPure *op_ADD_301 = ADD(op_ADD_270, op_LSHIFT_300); + RzILOpPure *op_LT_304 = SLT(op_ADD_301, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_309 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_310 = NEG(op_LSHIFT_309); + RzILOpPure *op_LSHIFT_315 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_318 = SUB(op_LSHIFT_315, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_319 = ITE(op_LT_304, op_NEG_310, op_SUB_318); + RzILOpEffect *gcc_expr_320 = BRANCH(op_EQ_158, EMPTY(), set_usr_field_call_231); + + // h_tmp421 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_322 = SETL("h_tmp421", cond_319); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_323 = SEQN(2, gcc_expr_320, op_ASSIGN_hybrid_tmp_322); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1)) ? ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) : h_tmp421) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_162 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_164 = LOGAND(op_RSHIFT_162, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_170 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_173 = LOGAND(op_RSHIFT_170, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_179 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_181 = LOGAND(op_RSHIFT_179, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_187 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_181), DUP(op_AND_181))), CAST(32, MSB(DUP(op_AND_181)), DUP(op_AND_181))), SN(32, 0)); + RzILOpPure *op_AND_190 = LOGAND(op_RSHIFT_187, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_193 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_173), DUP(op_AND_173))), CAST(16, MSB(DUP(op_AND_173)), DUP(op_AND_173))), CAST(32, MSB(CAST(16, MSB(op_AND_190), DUP(op_AND_190))), CAST(16, MSB(DUP(op_AND_190)), DUP(op_AND_190)))); + RzILOpPure *op_LSHIFT_196 = SHIFTL0(CAST(64, MSB(op_MUL_193), DUP(op_MUL_193)), SN(32, 1)); + RzILOpPure *op_ADD_197 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_164), DUP(op_AND_164))), CAST(32, MSB(DUP(op_AND_164)), DUP(op_AND_164))), op_LSHIFT_196); + RzILOpPure *op_RSHIFT_201 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_204 = LOGAND(op_RSHIFT_201, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_210 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_212 = LOGAND(op_RSHIFT_210, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_218 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_212), DUP(op_AND_212))), CAST(32, MSB(DUP(op_AND_212)), DUP(op_AND_212))), SN(32, 16)); + RzILOpPure *op_AND_221 = LOGAND(op_RSHIFT_218, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_224 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_204), DUP(op_AND_204))), CAST(16, MSB(DUP(op_AND_204)), DUP(op_AND_204))), CAST(32, MSB(CAST(16, MSB(op_AND_221), DUP(op_AND_221))), CAST(16, MSB(DUP(op_AND_221)), DUP(op_AND_221)))); + RzILOpPure *op_LSHIFT_227 = SHIFTL0(CAST(64, MSB(op_MUL_224), DUP(op_MUL_224)), SN(32, 1)); + RzILOpPure *op_ADD_228 = ADD(op_ADD_197, op_LSHIFT_227); + RzILOpPure *cond_324 = ITE(DUP(op_EQ_158), op_ADD_228, VARL("h_tmp421")); + RzILOpPure *op_AND_326 = LOGAND(cond_324, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_330 = SHIFTL0(op_AND_326, SN(32, 0x20)); + RzILOpPure *op_OR_331 = LOGOR(op_AND_7, op_LSHIFT_330); + RzILOpEffect *op_ASSIGN_332 = WRITE_REG(bundle, Rxx_op, op_OR_331); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_333 = SEQN(2, seq_323, op_ASSIGN_332); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_563 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_348 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_350 = LOGAND(op_RSHIFT_348, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_356 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_359 = LOGAND(op_RSHIFT_356, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_365 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_367 = LOGAND(op_RSHIFT_365, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_373 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_367), DUP(op_AND_367))), CAST(32, MSB(DUP(op_AND_367)), DUP(op_AND_367))), SN(32, 0)); + RzILOpPure *op_AND_376 = LOGAND(op_RSHIFT_373, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_379 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_359), DUP(op_AND_359))), CAST(16, MSB(DUP(op_AND_359)), DUP(op_AND_359))), CAST(32, MSB(CAST(16, MSB(op_AND_376), DUP(op_AND_376))), CAST(16, MSB(DUP(op_AND_376)), DUP(op_AND_376)))); + RzILOpPure *op_LSHIFT_382 = SHIFTL0(CAST(64, MSB(op_MUL_379), DUP(op_MUL_379)), SN(32, 1)); + RzILOpPure *op_ADD_383 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_350), DUP(op_AND_350))), CAST(32, MSB(DUP(op_AND_350)), DUP(op_AND_350))), op_LSHIFT_382); + RzILOpPure *op_RSHIFT_387 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_390 = LOGAND(op_RSHIFT_387, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_396 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_398 = LOGAND(op_RSHIFT_396, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_404 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_398), DUP(op_AND_398))), CAST(32, MSB(DUP(op_AND_398)), DUP(op_AND_398))), SN(32, 16)); + RzILOpPure *op_AND_407 = LOGAND(op_RSHIFT_404, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_410 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_390), DUP(op_AND_390))), CAST(16, MSB(DUP(op_AND_390)), DUP(op_AND_390))), CAST(32, MSB(CAST(16, MSB(op_AND_407), DUP(op_AND_407))), CAST(16, MSB(DUP(op_AND_407)), DUP(op_AND_407)))); + RzILOpPure *op_LSHIFT_413 = SHIFTL0(CAST(64, MSB(op_MUL_410), DUP(op_MUL_410)), SN(32, 1)); + RzILOpPure *op_ADD_414 = ADD(op_ADD_383, op_LSHIFT_413); + RzILOpPure *op_RSHIFT_423 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_425 = LOGAND(op_RSHIFT_423, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_431 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_434 = LOGAND(op_RSHIFT_431, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_440 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_442 = LOGAND(op_RSHIFT_440, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_448 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_442), DUP(op_AND_442))), CAST(32, MSB(DUP(op_AND_442)), DUP(op_AND_442))), SN(32, 0)); + RzILOpPure *op_AND_451 = LOGAND(op_RSHIFT_448, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_454 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_434), DUP(op_AND_434))), CAST(16, MSB(DUP(op_AND_434)), DUP(op_AND_434))), CAST(32, MSB(CAST(16, MSB(op_AND_451), DUP(op_AND_451))), CAST(16, MSB(DUP(op_AND_451)), DUP(op_AND_451)))); + RzILOpPure *op_LSHIFT_457 = SHIFTL0(CAST(64, MSB(op_MUL_454), DUP(op_MUL_454)), SN(32, 1)); + RzILOpPure *op_ADD_458 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_425), DUP(op_AND_425))), CAST(32, MSB(DUP(op_AND_425)), DUP(op_AND_425))), op_LSHIFT_457); + RzILOpPure *op_RSHIFT_462 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_465 = LOGAND(op_RSHIFT_462, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_471 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_473 = LOGAND(op_RSHIFT_471, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_479 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_473), DUP(op_AND_473))), CAST(32, MSB(DUP(op_AND_473)), DUP(op_AND_473))), SN(32, 16)); + RzILOpPure *op_AND_482 = LOGAND(op_RSHIFT_479, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_485 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_465), DUP(op_AND_465))), CAST(16, MSB(DUP(op_AND_465)), DUP(op_AND_465))), CAST(32, MSB(CAST(16, MSB(op_AND_482), DUP(op_AND_482))), CAST(16, MSB(DUP(op_AND_482)), DUP(op_AND_482)))); + RzILOpPure *op_LSHIFT_488 = SHIFTL0(CAST(64, MSB(op_MUL_485), DUP(op_MUL_485)), SN(32, 1)); + RzILOpPure *op_ADD_489 = ADD(op_ADD_458, op_LSHIFT_488); + RzILOpPure *op_EQ_490 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_414), SN(32, 0), SN(32, 0x20)), op_ADD_489); + RzILOpPure *op_RSHIFT_567 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_569 = LOGAND(op_RSHIFT_567, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_575 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_578 = LOGAND(op_RSHIFT_575, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_584 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_586 = LOGAND(op_RSHIFT_584, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_592 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_586), DUP(op_AND_586))), CAST(32, MSB(DUP(op_AND_586)), DUP(op_AND_586))), SN(32, 0)); + RzILOpPure *op_AND_595 = LOGAND(op_RSHIFT_592, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_598 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_578), DUP(op_AND_578))), CAST(16, MSB(DUP(op_AND_578)), DUP(op_AND_578))), CAST(32, MSB(CAST(16, MSB(op_AND_595), DUP(op_AND_595))), CAST(16, MSB(DUP(op_AND_595)), DUP(op_AND_595)))); + RzILOpPure *op_LSHIFT_601 = SHIFTL0(CAST(64, MSB(op_MUL_598), DUP(op_MUL_598)), SN(32, 1)); + RzILOpPure *op_ADD_602 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_569), DUP(op_AND_569))), CAST(32, MSB(DUP(op_AND_569)), DUP(op_AND_569))), op_LSHIFT_601); + RzILOpPure *op_RSHIFT_606 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_609 = LOGAND(op_RSHIFT_606, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_615 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_617 = LOGAND(op_RSHIFT_615, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_623 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_617), DUP(op_AND_617))), CAST(32, MSB(DUP(op_AND_617)), DUP(op_AND_617))), SN(32, 16)); + RzILOpPure *op_AND_626 = LOGAND(op_RSHIFT_623, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_629 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_609), DUP(op_AND_609))), CAST(16, MSB(DUP(op_AND_609)), DUP(op_AND_609))), CAST(32, MSB(CAST(16, MSB(op_AND_626), DUP(op_AND_626))), CAST(16, MSB(DUP(op_AND_626)), DUP(op_AND_626)))); + RzILOpPure *op_LSHIFT_632 = SHIFTL0(CAST(64, MSB(op_MUL_629), DUP(op_MUL_629)), SN(32, 1)); + RzILOpPure *op_ADD_633 = ADD(op_ADD_602, op_LSHIFT_632); + RzILOpPure *op_LT_636 = SLT(op_ADD_633, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_641 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_642 = NEG(op_LSHIFT_641); + RzILOpPure *op_LSHIFT_647 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_650 = SUB(op_LSHIFT_647, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_651 = ITE(op_LT_636, op_NEG_642, op_SUB_650); + RzILOpEffect *gcc_expr_652 = BRANCH(op_EQ_490, EMPTY(), set_usr_field_call_563); + + // h_tmp422 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_654 = SETL("h_tmp422", cond_651); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_655 = SEQN(2, gcc_expr_652, op_ASSIGN_hybrid_tmp_654); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1)) ? ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) : h_tmp422) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_339 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_340 = LOGNOT(op_LSHIFT_339); + RzILOpPure *op_AND_341 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_340); + RzILOpPure *op_RSHIFT_494 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_496 = LOGAND(op_RSHIFT_494, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_502 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_505 = LOGAND(op_RSHIFT_502, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_511 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_513 = LOGAND(op_RSHIFT_511, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_519 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_513), DUP(op_AND_513))), CAST(32, MSB(DUP(op_AND_513)), DUP(op_AND_513))), SN(32, 0)); + RzILOpPure *op_AND_522 = LOGAND(op_RSHIFT_519, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_525 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_505), DUP(op_AND_505))), CAST(16, MSB(DUP(op_AND_505)), DUP(op_AND_505))), CAST(32, MSB(CAST(16, MSB(op_AND_522), DUP(op_AND_522))), CAST(16, MSB(DUP(op_AND_522)), DUP(op_AND_522)))); + RzILOpPure *op_LSHIFT_528 = SHIFTL0(CAST(64, MSB(op_MUL_525), DUP(op_MUL_525)), SN(32, 1)); + RzILOpPure *op_ADD_529 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_496), DUP(op_AND_496))), CAST(32, MSB(DUP(op_AND_496)), DUP(op_AND_496))), op_LSHIFT_528); + RzILOpPure *op_RSHIFT_533 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_536 = LOGAND(op_RSHIFT_533, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_542 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_544 = LOGAND(op_RSHIFT_542, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_550 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_544), DUP(op_AND_544))), CAST(32, MSB(DUP(op_AND_544)), DUP(op_AND_544))), SN(32, 16)); + RzILOpPure *op_AND_553 = LOGAND(op_RSHIFT_550, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_556 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_536), DUP(op_AND_536))), CAST(16, MSB(DUP(op_AND_536)), DUP(op_AND_536))), CAST(32, MSB(CAST(16, MSB(op_AND_553), DUP(op_AND_553))), CAST(16, MSB(DUP(op_AND_553)), DUP(op_AND_553)))); + RzILOpPure *op_LSHIFT_559 = SHIFTL0(CAST(64, MSB(op_MUL_556), DUP(op_MUL_556)), SN(32, 1)); + RzILOpPure *op_ADD_560 = ADD(op_ADD_529, op_LSHIFT_559); + RzILOpPure *cond_656 = ITE(DUP(op_EQ_490), op_ADD_560, VARL("h_tmp422")); + RzILOpPure *op_AND_658 = LOGAND(cond_656, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_662 = SHIFTL0(op_AND_658, SN(32, 0)); + RzILOpPure *op_OR_663 = LOGOR(op_AND_341, op_LSHIFT_662); + RzILOpEffect *op_ASSIGN_664 = WRITE_REG(bundle, Rxx_op, op_OR_663); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_665 = SEQN(2, seq_655, op_ASSIGN_664); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_333, seq_665); + return instruction_sequence; +} + +// Rxx += vrcmpys(Rss,Rtt):<<1:sat:raw:lo +RzILOpEffect *hex_il_op_m2_vrcmpys_acc_s1_l(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_231 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rss, SN(32, 16)); + RzILOpPure *op_AND_26 = LOGAND(op_RSHIFT_23, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_33 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_35 = LOGAND(op_RSHIFT_33, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_41 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_35), DUP(op_AND_35))), CAST(32, MSB(DUP(op_AND_35)), DUP(op_AND_35))), SN(32, 0)); + RzILOpPure *op_AND_44 = LOGAND(op_RSHIFT_41, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_47 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_26), DUP(op_AND_26))), CAST(16, MSB(DUP(op_AND_26)), DUP(op_AND_26))), CAST(32, MSB(CAST(16, MSB(op_AND_44), DUP(op_AND_44))), CAST(16, MSB(DUP(op_AND_44)), DUP(op_AND_44)))); + RzILOpPure *op_LSHIFT_50 = SHIFTL0(CAST(64, MSB(op_MUL_47), DUP(op_MUL_47)), SN(32, 1)); + RzILOpPure *op_ADD_51 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_16), DUP(op_AND_16))), CAST(32, MSB(DUP(op_AND_16)), DUP(op_AND_16))), op_LSHIFT_50); + RzILOpPure *op_RSHIFT_55 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_58 = LOGAND(op_RSHIFT_55, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_64 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_66 = LOGAND(op_RSHIFT_64, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_72 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_66), DUP(op_AND_66))), CAST(32, MSB(DUP(op_AND_66)), DUP(op_AND_66))), SN(32, 16)); + RzILOpPure *op_AND_75 = LOGAND(op_RSHIFT_72, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_78 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_58), DUP(op_AND_58))), CAST(16, MSB(DUP(op_AND_58)), DUP(op_AND_58))), CAST(32, MSB(CAST(16, MSB(op_AND_75), DUP(op_AND_75))), CAST(16, MSB(DUP(op_AND_75)), DUP(op_AND_75)))); + RzILOpPure *op_LSHIFT_81 = SHIFTL0(CAST(64, MSB(op_MUL_78), DUP(op_MUL_78)), SN(32, 1)); + RzILOpPure *op_ADD_82 = ADD(op_ADD_51, op_LSHIFT_81); + RzILOpPure *op_RSHIFT_91 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_93 = LOGAND(op_RSHIFT_91, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_99 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_102 = LOGAND(op_RSHIFT_99, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_108 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_110 = LOGAND(op_RSHIFT_108, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_116 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_110), DUP(op_AND_110))), CAST(32, MSB(DUP(op_AND_110)), DUP(op_AND_110))), SN(32, 0)); + RzILOpPure *op_AND_119 = LOGAND(op_RSHIFT_116, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_122 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_102), DUP(op_AND_102))), CAST(16, MSB(DUP(op_AND_102)), DUP(op_AND_102))), CAST(32, MSB(CAST(16, MSB(op_AND_119), DUP(op_AND_119))), CAST(16, MSB(DUP(op_AND_119)), DUP(op_AND_119)))); + RzILOpPure *op_LSHIFT_125 = SHIFTL0(CAST(64, MSB(op_MUL_122), DUP(op_MUL_122)), SN(32, 1)); + RzILOpPure *op_ADD_126 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_93), DUP(op_AND_93))), CAST(32, MSB(DUP(op_AND_93)), DUP(op_AND_93))), op_LSHIFT_125); + RzILOpPure *op_RSHIFT_130 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_133 = LOGAND(op_RSHIFT_130, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_139 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_141 = LOGAND(op_RSHIFT_139, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_147 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_141), DUP(op_AND_141))), CAST(32, MSB(DUP(op_AND_141)), DUP(op_AND_141))), SN(32, 16)); + RzILOpPure *op_AND_150 = LOGAND(op_RSHIFT_147, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_153 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_133), DUP(op_AND_133))), CAST(16, MSB(DUP(op_AND_133)), DUP(op_AND_133))), CAST(32, MSB(CAST(16, MSB(op_AND_150), DUP(op_AND_150))), CAST(16, MSB(DUP(op_AND_150)), DUP(op_AND_150)))); + RzILOpPure *op_LSHIFT_156 = SHIFTL0(CAST(64, MSB(op_MUL_153), DUP(op_MUL_153)), SN(32, 1)); + RzILOpPure *op_ADD_157 = ADD(op_ADD_126, op_LSHIFT_156); + RzILOpPure *op_EQ_158 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_82), SN(32, 0), SN(32, 0x20)), op_ADD_157); + RzILOpPure *op_RSHIFT_235 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_237 = LOGAND(op_RSHIFT_235, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_243 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_246 = LOGAND(op_RSHIFT_243, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_252 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_254 = LOGAND(op_RSHIFT_252, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_260 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_254), DUP(op_AND_254))), CAST(32, MSB(DUP(op_AND_254)), DUP(op_AND_254))), SN(32, 0)); + RzILOpPure *op_AND_263 = LOGAND(op_RSHIFT_260, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_266 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_246), DUP(op_AND_246))), CAST(16, MSB(DUP(op_AND_246)), DUP(op_AND_246))), CAST(32, MSB(CAST(16, MSB(op_AND_263), DUP(op_AND_263))), CAST(16, MSB(DUP(op_AND_263)), DUP(op_AND_263)))); + RzILOpPure *op_LSHIFT_269 = SHIFTL0(CAST(64, MSB(op_MUL_266), DUP(op_MUL_266)), SN(32, 1)); + RzILOpPure *op_ADD_270 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_237), DUP(op_AND_237))), CAST(32, MSB(DUP(op_AND_237)), DUP(op_AND_237))), op_LSHIFT_269); + RzILOpPure *op_RSHIFT_274 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_277 = LOGAND(op_RSHIFT_274, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_283 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_285 = LOGAND(op_RSHIFT_283, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_291 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_285), DUP(op_AND_285))), CAST(32, MSB(DUP(op_AND_285)), DUP(op_AND_285))), SN(32, 16)); + RzILOpPure *op_AND_294 = LOGAND(op_RSHIFT_291, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_297 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_277), DUP(op_AND_277))), CAST(16, MSB(DUP(op_AND_277)), DUP(op_AND_277))), CAST(32, MSB(CAST(16, MSB(op_AND_294), DUP(op_AND_294))), CAST(16, MSB(DUP(op_AND_294)), DUP(op_AND_294)))); + RzILOpPure *op_LSHIFT_300 = SHIFTL0(CAST(64, MSB(op_MUL_297), DUP(op_MUL_297)), SN(32, 1)); + RzILOpPure *op_ADD_301 = ADD(op_ADD_270, op_LSHIFT_300); + RzILOpPure *op_LT_304 = SLT(op_ADD_301, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_309 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_310 = NEG(op_LSHIFT_309); + RzILOpPure *op_LSHIFT_315 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_318 = SUB(op_LSHIFT_315, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_319 = ITE(op_LT_304, op_NEG_310, op_SUB_318); + RzILOpEffect *gcc_expr_320 = BRANCH(op_EQ_158, EMPTY(), set_usr_field_call_231); + + // h_tmp423 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_322 = SETL("h_tmp423", cond_319); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_323 = SEQN(2, gcc_expr_320, op_ASSIGN_hybrid_tmp_322); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1)) ? ((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) : h_tmp423) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_162 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_164 = LOGAND(op_RSHIFT_162, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_170 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_173 = LOGAND(op_RSHIFT_170, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_179 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_181 = LOGAND(op_RSHIFT_179, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_187 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_181), DUP(op_AND_181))), CAST(32, MSB(DUP(op_AND_181)), DUP(op_AND_181))), SN(32, 0)); + RzILOpPure *op_AND_190 = LOGAND(op_RSHIFT_187, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_193 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_173), DUP(op_AND_173))), CAST(16, MSB(DUP(op_AND_173)), DUP(op_AND_173))), CAST(32, MSB(CAST(16, MSB(op_AND_190), DUP(op_AND_190))), CAST(16, MSB(DUP(op_AND_190)), DUP(op_AND_190)))); + RzILOpPure *op_LSHIFT_196 = SHIFTL0(CAST(64, MSB(op_MUL_193), DUP(op_MUL_193)), SN(32, 1)); + RzILOpPure *op_ADD_197 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_164), DUP(op_AND_164))), CAST(32, MSB(DUP(op_AND_164)), DUP(op_AND_164))), op_LSHIFT_196); + RzILOpPure *op_RSHIFT_201 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_204 = LOGAND(op_RSHIFT_201, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_210 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_212 = LOGAND(op_RSHIFT_210, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_218 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_212), DUP(op_AND_212))), CAST(32, MSB(DUP(op_AND_212)), DUP(op_AND_212))), SN(32, 16)); + RzILOpPure *op_AND_221 = LOGAND(op_RSHIFT_218, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_224 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_204), DUP(op_AND_204))), CAST(16, MSB(DUP(op_AND_204)), DUP(op_AND_204))), CAST(32, MSB(CAST(16, MSB(op_AND_221), DUP(op_AND_221))), CAST(16, MSB(DUP(op_AND_221)), DUP(op_AND_221)))); + RzILOpPure *op_LSHIFT_227 = SHIFTL0(CAST(64, MSB(op_MUL_224), DUP(op_MUL_224)), SN(32, 1)); + RzILOpPure *op_ADD_228 = ADD(op_ADD_197, op_LSHIFT_227); + RzILOpPure *cond_324 = ITE(DUP(op_EQ_158), op_ADD_228, VARL("h_tmp423")); + RzILOpPure *op_AND_326 = LOGAND(cond_324, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_330 = SHIFTL0(op_AND_326, SN(32, 0x20)); + RzILOpPure *op_OR_331 = LOGOR(op_AND_7, op_LSHIFT_330); + RzILOpEffect *op_ASSIGN_332 = WRITE_REG(bundle, Rxx_op, op_OR_331); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_333 = SEQN(2, seq_323, op_ASSIGN_332); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_563 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_348 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_350 = LOGAND(op_RSHIFT_348, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_356 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_359 = LOGAND(op_RSHIFT_356, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_365 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_367 = LOGAND(op_RSHIFT_365, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_373 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_367), DUP(op_AND_367))), CAST(32, MSB(DUP(op_AND_367)), DUP(op_AND_367))), SN(32, 0)); + RzILOpPure *op_AND_376 = LOGAND(op_RSHIFT_373, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_379 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_359), DUP(op_AND_359))), CAST(16, MSB(DUP(op_AND_359)), DUP(op_AND_359))), CAST(32, MSB(CAST(16, MSB(op_AND_376), DUP(op_AND_376))), CAST(16, MSB(DUP(op_AND_376)), DUP(op_AND_376)))); + RzILOpPure *op_LSHIFT_382 = SHIFTL0(CAST(64, MSB(op_MUL_379), DUP(op_MUL_379)), SN(32, 1)); + RzILOpPure *op_ADD_383 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_350), DUP(op_AND_350))), CAST(32, MSB(DUP(op_AND_350)), DUP(op_AND_350))), op_LSHIFT_382); + RzILOpPure *op_RSHIFT_387 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_390 = LOGAND(op_RSHIFT_387, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_396 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_398 = LOGAND(op_RSHIFT_396, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_404 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_398), DUP(op_AND_398))), CAST(32, MSB(DUP(op_AND_398)), DUP(op_AND_398))), SN(32, 16)); + RzILOpPure *op_AND_407 = LOGAND(op_RSHIFT_404, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_410 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_390), DUP(op_AND_390))), CAST(16, MSB(DUP(op_AND_390)), DUP(op_AND_390))), CAST(32, MSB(CAST(16, MSB(op_AND_407), DUP(op_AND_407))), CAST(16, MSB(DUP(op_AND_407)), DUP(op_AND_407)))); + RzILOpPure *op_LSHIFT_413 = SHIFTL0(CAST(64, MSB(op_MUL_410), DUP(op_MUL_410)), SN(32, 1)); + RzILOpPure *op_ADD_414 = ADD(op_ADD_383, op_LSHIFT_413); + RzILOpPure *op_RSHIFT_423 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_425 = LOGAND(op_RSHIFT_423, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_431 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_434 = LOGAND(op_RSHIFT_431, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_440 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_442 = LOGAND(op_RSHIFT_440, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_448 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_442), DUP(op_AND_442))), CAST(32, MSB(DUP(op_AND_442)), DUP(op_AND_442))), SN(32, 0)); + RzILOpPure *op_AND_451 = LOGAND(op_RSHIFT_448, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_454 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_434), DUP(op_AND_434))), CAST(16, MSB(DUP(op_AND_434)), DUP(op_AND_434))), CAST(32, MSB(CAST(16, MSB(op_AND_451), DUP(op_AND_451))), CAST(16, MSB(DUP(op_AND_451)), DUP(op_AND_451)))); + RzILOpPure *op_LSHIFT_457 = SHIFTL0(CAST(64, MSB(op_MUL_454), DUP(op_MUL_454)), SN(32, 1)); + RzILOpPure *op_ADD_458 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_425), DUP(op_AND_425))), CAST(32, MSB(DUP(op_AND_425)), DUP(op_AND_425))), op_LSHIFT_457); + RzILOpPure *op_RSHIFT_462 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_465 = LOGAND(op_RSHIFT_462, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_471 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_473 = LOGAND(op_RSHIFT_471, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_479 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_473), DUP(op_AND_473))), CAST(32, MSB(DUP(op_AND_473)), DUP(op_AND_473))), SN(32, 16)); + RzILOpPure *op_AND_482 = LOGAND(op_RSHIFT_479, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_485 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_465), DUP(op_AND_465))), CAST(16, MSB(DUP(op_AND_465)), DUP(op_AND_465))), CAST(32, MSB(CAST(16, MSB(op_AND_482), DUP(op_AND_482))), CAST(16, MSB(DUP(op_AND_482)), DUP(op_AND_482)))); + RzILOpPure *op_LSHIFT_488 = SHIFTL0(CAST(64, MSB(op_MUL_485), DUP(op_MUL_485)), SN(32, 1)); + RzILOpPure *op_ADD_489 = ADD(op_ADD_458, op_LSHIFT_488); + RzILOpPure *op_EQ_490 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_414), SN(32, 0), SN(32, 0x20)), op_ADD_489); + RzILOpPure *op_RSHIFT_567 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_569 = LOGAND(op_RSHIFT_567, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_575 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_578 = LOGAND(op_RSHIFT_575, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_584 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_586 = LOGAND(op_RSHIFT_584, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_592 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_586), DUP(op_AND_586))), CAST(32, MSB(DUP(op_AND_586)), DUP(op_AND_586))), SN(32, 0)); + RzILOpPure *op_AND_595 = LOGAND(op_RSHIFT_592, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_598 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_578), DUP(op_AND_578))), CAST(16, MSB(DUP(op_AND_578)), DUP(op_AND_578))), CAST(32, MSB(CAST(16, MSB(op_AND_595), DUP(op_AND_595))), CAST(16, MSB(DUP(op_AND_595)), DUP(op_AND_595)))); + RzILOpPure *op_LSHIFT_601 = SHIFTL0(CAST(64, MSB(op_MUL_598), DUP(op_MUL_598)), SN(32, 1)); + RzILOpPure *op_ADD_602 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_569), DUP(op_AND_569))), CAST(32, MSB(DUP(op_AND_569)), DUP(op_AND_569))), op_LSHIFT_601); + RzILOpPure *op_RSHIFT_606 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_609 = LOGAND(op_RSHIFT_606, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_615 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_617 = LOGAND(op_RSHIFT_615, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_623 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_617), DUP(op_AND_617))), CAST(32, MSB(DUP(op_AND_617)), DUP(op_AND_617))), SN(32, 16)); + RzILOpPure *op_AND_626 = LOGAND(op_RSHIFT_623, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_629 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_609), DUP(op_AND_609))), CAST(16, MSB(DUP(op_AND_609)), DUP(op_AND_609))), CAST(32, MSB(CAST(16, MSB(op_AND_626), DUP(op_AND_626))), CAST(16, MSB(DUP(op_AND_626)), DUP(op_AND_626)))); + RzILOpPure *op_LSHIFT_632 = SHIFTL0(CAST(64, MSB(op_MUL_629), DUP(op_MUL_629)), SN(32, 1)); + RzILOpPure *op_ADD_633 = ADD(op_ADD_602, op_LSHIFT_632); + RzILOpPure *op_LT_636 = SLT(op_ADD_633, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_641 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_642 = NEG(op_LSHIFT_641); + RzILOpPure *op_LSHIFT_647 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_650 = SUB(op_LSHIFT_647, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_651 = ITE(op_LT_636, op_NEG_642, op_SUB_650); + RzILOpEffect *gcc_expr_652 = BRANCH(op_EQ_490, EMPTY(), set_usr_field_call_563); + + // h_tmp424 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_654 = SETL("h_tmp424", cond_651); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rxx > ...; + RzILOpEffect *seq_655 = SEQN(2, gcc_expr_652, op_ASSIGN_hybrid_tmp_654); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1)) ? ((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) : h_tmp424) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_339 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_340 = LOGNOT(op_LSHIFT_339); + RzILOpPure *op_AND_341 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_340); + RzILOpPure *op_RSHIFT_494 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_496 = LOGAND(op_RSHIFT_494, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_502 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_505 = LOGAND(op_RSHIFT_502, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_511 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_513 = LOGAND(op_RSHIFT_511, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_519 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_513), DUP(op_AND_513))), CAST(32, MSB(DUP(op_AND_513)), DUP(op_AND_513))), SN(32, 0)); + RzILOpPure *op_AND_522 = LOGAND(op_RSHIFT_519, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_525 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_505), DUP(op_AND_505))), CAST(16, MSB(DUP(op_AND_505)), DUP(op_AND_505))), CAST(32, MSB(CAST(16, MSB(op_AND_522), DUP(op_AND_522))), CAST(16, MSB(DUP(op_AND_522)), DUP(op_AND_522)))); + RzILOpPure *op_LSHIFT_528 = SHIFTL0(CAST(64, MSB(op_MUL_525), DUP(op_MUL_525)), SN(32, 1)); + RzILOpPure *op_ADD_529 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_496), DUP(op_AND_496))), CAST(32, MSB(DUP(op_AND_496)), DUP(op_AND_496))), op_LSHIFT_528); + RzILOpPure *op_RSHIFT_533 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_536 = LOGAND(op_RSHIFT_533, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_542 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_544 = LOGAND(op_RSHIFT_542, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_550 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_544), DUP(op_AND_544))), CAST(32, MSB(DUP(op_AND_544)), DUP(op_AND_544))), SN(32, 16)); + RzILOpPure *op_AND_553 = LOGAND(op_RSHIFT_550, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_556 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_536), DUP(op_AND_536))), CAST(16, MSB(DUP(op_AND_536)), DUP(op_AND_536))), CAST(32, MSB(CAST(16, MSB(op_AND_553), DUP(op_AND_553))), CAST(16, MSB(DUP(op_AND_553)), DUP(op_AND_553)))); + RzILOpPure *op_LSHIFT_559 = SHIFTL0(CAST(64, MSB(op_MUL_556), DUP(op_MUL_556)), SN(32, 1)); + RzILOpPure *op_ADD_560 = ADD(op_ADD_529, op_LSHIFT_559); + RzILOpPure *cond_656 = ITE(DUP(op_EQ_490), op_ADD_560, VARL("h_tmp424")); + RzILOpPure *op_AND_658 = LOGAND(cond_656, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_662 = SHIFTL0(op_AND_658, SN(32, 0)); + RzILOpPure *op_OR_663 = LOGOR(op_AND_341, op_LSHIFT_662); + RzILOpEffect *op_ASSIGN_664 = WRITE_REG(bundle, Rxx_op, op_OR_663); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_665 = SEQN(2, seq_655, op_ASSIGN_664); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_333, seq_665); + return instruction_sequence; +} + +// Rdd = vrcmpys(Rss,Rtt):<<1:sat:raw:hi +RzILOpEffect *hex_il_op_m2_vrcmpys_s1_h(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_204 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rss, SN(32, 16)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_15, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_25 = SHIFTRA(Rtt, SN(32, 0x20)); + RzILOpPure *op_AND_27 = LOGAND(op_RSHIFT_25, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_33 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_27), DUP(op_AND_27))), CAST(32, MSB(DUP(op_AND_27)), DUP(op_AND_27))), SN(32, 0)); + RzILOpPure *op_AND_36 = LOGAND(op_RSHIFT_33, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_39 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(CAST(16, MSB(op_AND_36), DUP(op_AND_36))), CAST(16, MSB(DUP(op_AND_36)), DUP(op_AND_36)))); + RzILOpPure *op_LSHIFT_42 = SHIFTL0(CAST(64, MSB(op_MUL_39), DUP(op_MUL_39)), SN(32, 1)); + RzILOpPure *op_RSHIFT_46 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_49 = LOGAND(op_RSHIFT_46, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_55 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_57 = LOGAND(op_RSHIFT_55, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_63 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_57), DUP(op_AND_57))), CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57))), SN(32, 16)); + RzILOpPure *op_AND_66 = LOGAND(op_RSHIFT_63, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_69 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_49), DUP(op_AND_49))), CAST(16, MSB(DUP(op_AND_49)), DUP(op_AND_49))), CAST(32, MSB(CAST(16, MSB(op_AND_66), DUP(op_AND_66))), CAST(16, MSB(DUP(op_AND_66)), DUP(op_AND_66)))); + RzILOpPure *op_LSHIFT_72 = SHIFTL0(CAST(64, MSB(op_MUL_69), DUP(op_MUL_69)), SN(32, 1)); + RzILOpPure *op_ADD_73 = ADD(op_LSHIFT_42, op_LSHIFT_72); + RzILOpPure *op_RSHIFT_82 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_85 = LOGAND(op_RSHIFT_82, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_91 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_93 = LOGAND(op_RSHIFT_91, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_99 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_93), DUP(op_AND_93))), CAST(32, MSB(DUP(op_AND_93)), DUP(op_AND_93))), SN(32, 0)); + RzILOpPure *op_AND_102 = LOGAND(op_RSHIFT_99, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_105 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_85), DUP(op_AND_85))), CAST(16, MSB(DUP(op_AND_85)), DUP(op_AND_85))), CAST(32, MSB(CAST(16, MSB(op_AND_102), DUP(op_AND_102))), CAST(16, MSB(DUP(op_AND_102)), DUP(op_AND_102)))); + RzILOpPure *op_LSHIFT_108 = SHIFTL0(CAST(64, MSB(op_MUL_105), DUP(op_MUL_105)), SN(32, 1)); + RzILOpPure *op_RSHIFT_112 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_115 = LOGAND(op_RSHIFT_112, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_121 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_123 = LOGAND(op_RSHIFT_121, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_129 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_123), DUP(op_AND_123))), CAST(32, MSB(DUP(op_AND_123)), DUP(op_AND_123))), SN(32, 16)); + RzILOpPure *op_AND_132 = LOGAND(op_RSHIFT_129, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_135 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_115), DUP(op_AND_115))), CAST(16, MSB(DUP(op_AND_115)), DUP(op_AND_115))), CAST(32, MSB(CAST(16, MSB(op_AND_132), DUP(op_AND_132))), CAST(16, MSB(DUP(op_AND_132)), DUP(op_AND_132)))); + RzILOpPure *op_LSHIFT_138 = SHIFTL0(CAST(64, MSB(op_MUL_135), DUP(op_MUL_135)), SN(32, 1)); + RzILOpPure *op_ADD_139 = ADD(op_LSHIFT_108, op_LSHIFT_138); + RzILOpPure *op_EQ_140 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_73), SN(32, 0), SN(32, 0x20)), op_ADD_139); + RzILOpPure *op_RSHIFT_208 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_211 = LOGAND(op_RSHIFT_208, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_217 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_219 = LOGAND(op_RSHIFT_217, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_225 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_219), DUP(op_AND_219))), CAST(32, MSB(DUP(op_AND_219)), DUP(op_AND_219))), SN(32, 0)); + RzILOpPure *op_AND_228 = LOGAND(op_RSHIFT_225, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_231 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_211), DUP(op_AND_211))), CAST(16, MSB(DUP(op_AND_211)), DUP(op_AND_211))), CAST(32, MSB(CAST(16, MSB(op_AND_228), DUP(op_AND_228))), CAST(16, MSB(DUP(op_AND_228)), DUP(op_AND_228)))); + RzILOpPure *op_LSHIFT_234 = SHIFTL0(CAST(64, MSB(op_MUL_231), DUP(op_MUL_231)), SN(32, 1)); + RzILOpPure *op_RSHIFT_238 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_241 = LOGAND(op_RSHIFT_238, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_247 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_249 = LOGAND(op_RSHIFT_247, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_255 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_249), DUP(op_AND_249))), CAST(32, MSB(DUP(op_AND_249)), DUP(op_AND_249))), SN(32, 16)); + RzILOpPure *op_AND_258 = LOGAND(op_RSHIFT_255, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_261 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_241), DUP(op_AND_241))), CAST(16, MSB(DUP(op_AND_241)), DUP(op_AND_241))), CAST(32, MSB(CAST(16, MSB(op_AND_258), DUP(op_AND_258))), CAST(16, MSB(DUP(op_AND_258)), DUP(op_AND_258)))); + RzILOpPure *op_LSHIFT_264 = SHIFTL0(CAST(64, MSB(op_MUL_261), DUP(op_MUL_261)), SN(32, 1)); + RzILOpPure *op_ADD_265 = ADD(op_LSHIFT_234, op_LSHIFT_264); + RzILOpPure *op_LT_268 = SLT(op_ADD_265, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_273 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_274 = NEG(op_LSHIFT_273); + RzILOpPure *op_LSHIFT_279 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_282 = SUB(op_LSHIFT_279, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_283 = ITE(op_LT_268, op_NEG_274, op_SUB_282); + RzILOpEffect *gcc_expr_284 = BRANCH(op_EQ_140, EMPTY(), set_usr_field_call_204); + + // h_tmp425 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_286 = SETL("h_tmp425", cond_283); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_287 = SEQN(2, gcc_expr_284, op_ASSIGN_hybrid_tmp_286); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1)) ? (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) : h_tmp425) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_144 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_147 = LOGAND(op_RSHIFT_144, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_153 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_155 = LOGAND(op_RSHIFT_153, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_161 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_155), DUP(op_AND_155))), CAST(32, MSB(DUP(op_AND_155)), DUP(op_AND_155))), SN(32, 0)); + RzILOpPure *op_AND_164 = LOGAND(op_RSHIFT_161, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_167 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_147), DUP(op_AND_147))), CAST(16, MSB(DUP(op_AND_147)), DUP(op_AND_147))), CAST(32, MSB(CAST(16, MSB(op_AND_164), DUP(op_AND_164))), CAST(16, MSB(DUP(op_AND_164)), DUP(op_AND_164)))); + RzILOpPure *op_LSHIFT_170 = SHIFTL0(CAST(64, MSB(op_MUL_167), DUP(op_MUL_167)), SN(32, 1)); + RzILOpPure *op_RSHIFT_174 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_177 = LOGAND(op_RSHIFT_174, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_183 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_185 = LOGAND(op_RSHIFT_183, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_191 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_185), DUP(op_AND_185))), CAST(32, MSB(DUP(op_AND_185)), DUP(op_AND_185))), SN(32, 16)); + RzILOpPure *op_AND_194 = LOGAND(op_RSHIFT_191, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_197 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_177), DUP(op_AND_177))), CAST(16, MSB(DUP(op_AND_177)), DUP(op_AND_177))), CAST(32, MSB(CAST(16, MSB(op_AND_194), DUP(op_AND_194))), CAST(16, MSB(DUP(op_AND_194)), DUP(op_AND_194)))); + RzILOpPure *op_LSHIFT_200 = SHIFTL0(CAST(64, MSB(op_MUL_197), DUP(op_MUL_197)), SN(32, 1)); + RzILOpPure *op_ADD_201 = ADD(op_LSHIFT_170, op_LSHIFT_200); + RzILOpPure *cond_288 = ITE(DUP(op_EQ_140), op_ADD_201, VARL("h_tmp425")); + RzILOpPure *op_AND_290 = LOGAND(cond_288, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_294 = SHIFTL0(op_AND_290, SN(32, 0x20)); + RzILOpPure *op_OR_295 = LOGOR(op_AND_7, op_LSHIFT_294); + RzILOpEffect *op_ASSIGN_296 = WRITE_REG(bundle, Rdd_op, op_OR_295); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_297 = SEQN(2, seq_287, op_ASSIGN_296); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_500 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_312 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_315 = LOGAND(op_RSHIFT_312, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_321 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_323 = LOGAND(op_RSHIFT_321, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_329 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_323), DUP(op_AND_323))), CAST(32, MSB(DUP(op_AND_323)), DUP(op_AND_323))), SN(32, 0)); + RzILOpPure *op_AND_332 = LOGAND(op_RSHIFT_329, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_335 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_315), DUP(op_AND_315))), CAST(16, MSB(DUP(op_AND_315)), DUP(op_AND_315))), CAST(32, MSB(CAST(16, MSB(op_AND_332), DUP(op_AND_332))), CAST(16, MSB(DUP(op_AND_332)), DUP(op_AND_332)))); + RzILOpPure *op_LSHIFT_338 = SHIFTL0(CAST(64, MSB(op_MUL_335), DUP(op_MUL_335)), SN(32, 1)); + RzILOpPure *op_RSHIFT_342 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_345 = LOGAND(op_RSHIFT_342, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_351 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_353 = LOGAND(op_RSHIFT_351, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_359 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_353), DUP(op_AND_353))), CAST(32, MSB(DUP(op_AND_353)), DUP(op_AND_353))), SN(32, 16)); + RzILOpPure *op_AND_362 = LOGAND(op_RSHIFT_359, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_365 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_345), DUP(op_AND_345))), CAST(16, MSB(DUP(op_AND_345)), DUP(op_AND_345))), CAST(32, MSB(CAST(16, MSB(op_AND_362), DUP(op_AND_362))), CAST(16, MSB(DUP(op_AND_362)), DUP(op_AND_362)))); + RzILOpPure *op_LSHIFT_368 = SHIFTL0(CAST(64, MSB(op_MUL_365), DUP(op_MUL_365)), SN(32, 1)); + RzILOpPure *op_ADD_369 = ADD(op_LSHIFT_338, op_LSHIFT_368); + RzILOpPure *op_RSHIFT_378 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_381 = LOGAND(op_RSHIFT_378, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_387 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_389 = LOGAND(op_RSHIFT_387, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_395 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_389), DUP(op_AND_389))), CAST(32, MSB(DUP(op_AND_389)), DUP(op_AND_389))), SN(32, 0)); + RzILOpPure *op_AND_398 = LOGAND(op_RSHIFT_395, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_401 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_381), DUP(op_AND_381))), CAST(16, MSB(DUP(op_AND_381)), DUP(op_AND_381))), CAST(32, MSB(CAST(16, MSB(op_AND_398), DUP(op_AND_398))), CAST(16, MSB(DUP(op_AND_398)), DUP(op_AND_398)))); + RzILOpPure *op_LSHIFT_404 = SHIFTL0(CAST(64, MSB(op_MUL_401), DUP(op_MUL_401)), SN(32, 1)); + RzILOpPure *op_RSHIFT_408 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_411 = LOGAND(op_RSHIFT_408, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_417 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_419 = LOGAND(op_RSHIFT_417, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_425 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_419), DUP(op_AND_419))), CAST(32, MSB(DUP(op_AND_419)), DUP(op_AND_419))), SN(32, 16)); + RzILOpPure *op_AND_428 = LOGAND(op_RSHIFT_425, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_431 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_411), DUP(op_AND_411))), CAST(16, MSB(DUP(op_AND_411)), DUP(op_AND_411))), CAST(32, MSB(CAST(16, MSB(op_AND_428), DUP(op_AND_428))), CAST(16, MSB(DUP(op_AND_428)), DUP(op_AND_428)))); + RzILOpPure *op_LSHIFT_434 = SHIFTL0(CAST(64, MSB(op_MUL_431), DUP(op_MUL_431)), SN(32, 1)); + RzILOpPure *op_ADD_435 = ADD(op_LSHIFT_404, op_LSHIFT_434); + RzILOpPure *op_EQ_436 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_369), SN(32, 0), SN(32, 0x20)), op_ADD_435); + RzILOpPure *op_RSHIFT_504 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_507 = LOGAND(op_RSHIFT_504, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_513 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_515 = LOGAND(op_RSHIFT_513, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_521 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_515), DUP(op_AND_515))), CAST(32, MSB(DUP(op_AND_515)), DUP(op_AND_515))), SN(32, 0)); + RzILOpPure *op_AND_524 = LOGAND(op_RSHIFT_521, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_527 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_507), DUP(op_AND_507))), CAST(16, MSB(DUP(op_AND_507)), DUP(op_AND_507))), CAST(32, MSB(CAST(16, MSB(op_AND_524), DUP(op_AND_524))), CAST(16, MSB(DUP(op_AND_524)), DUP(op_AND_524)))); + RzILOpPure *op_LSHIFT_530 = SHIFTL0(CAST(64, MSB(op_MUL_527), DUP(op_MUL_527)), SN(32, 1)); + RzILOpPure *op_RSHIFT_534 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_537 = LOGAND(op_RSHIFT_534, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_543 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_545 = LOGAND(op_RSHIFT_543, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_551 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_545), DUP(op_AND_545))), CAST(32, MSB(DUP(op_AND_545)), DUP(op_AND_545))), SN(32, 16)); + RzILOpPure *op_AND_554 = LOGAND(op_RSHIFT_551, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_557 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_537), DUP(op_AND_537))), CAST(16, MSB(DUP(op_AND_537)), DUP(op_AND_537))), CAST(32, MSB(CAST(16, MSB(op_AND_554), DUP(op_AND_554))), CAST(16, MSB(DUP(op_AND_554)), DUP(op_AND_554)))); + RzILOpPure *op_LSHIFT_560 = SHIFTL0(CAST(64, MSB(op_MUL_557), DUP(op_MUL_557)), SN(32, 1)); + RzILOpPure *op_ADD_561 = ADD(op_LSHIFT_530, op_LSHIFT_560); + RzILOpPure *op_LT_564 = SLT(op_ADD_561, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_569 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_570 = NEG(op_LSHIFT_569); + RzILOpPure *op_LSHIFT_575 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_578 = SUB(op_LSHIFT_575, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_579 = ITE(op_LT_564, op_NEG_570, op_SUB_578); + RzILOpEffect *gcc_expr_580 = BRANCH(op_EQ_436, EMPTY(), set_usr_field_call_500); + + // h_tmp426 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_582 = SETL("h_tmp426", cond_579); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_583 = SEQN(2, gcc_expr_580, op_ASSIGN_hybrid_tmp_582); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1)) ? (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) : h_tmp426) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_303 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_304 = LOGNOT(op_LSHIFT_303); + RzILOpPure *op_AND_305 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_304); + RzILOpPure *op_RSHIFT_440 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_443 = LOGAND(op_RSHIFT_440, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_449 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_451 = LOGAND(op_RSHIFT_449, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_457 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_451), DUP(op_AND_451))), CAST(32, MSB(DUP(op_AND_451)), DUP(op_AND_451))), SN(32, 0)); + RzILOpPure *op_AND_460 = LOGAND(op_RSHIFT_457, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_463 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_443), DUP(op_AND_443))), CAST(16, MSB(DUP(op_AND_443)), DUP(op_AND_443))), CAST(32, MSB(CAST(16, MSB(op_AND_460), DUP(op_AND_460))), CAST(16, MSB(DUP(op_AND_460)), DUP(op_AND_460)))); + RzILOpPure *op_LSHIFT_466 = SHIFTL0(CAST(64, MSB(op_MUL_463), DUP(op_MUL_463)), SN(32, 1)); + RzILOpPure *op_RSHIFT_470 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_473 = LOGAND(op_RSHIFT_470, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_479 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_481 = LOGAND(op_RSHIFT_479, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_487 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_481), DUP(op_AND_481))), CAST(32, MSB(DUP(op_AND_481)), DUP(op_AND_481))), SN(32, 16)); + RzILOpPure *op_AND_490 = LOGAND(op_RSHIFT_487, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_493 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_473), DUP(op_AND_473))), CAST(16, MSB(DUP(op_AND_473)), DUP(op_AND_473))), CAST(32, MSB(CAST(16, MSB(op_AND_490), DUP(op_AND_490))), CAST(16, MSB(DUP(op_AND_490)), DUP(op_AND_490)))); + RzILOpPure *op_LSHIFT_496 = SHIFTL0(CAST(64, MSB(op_MUL_493), DUP(op_MUL_493)), SN(32, 1)); + RzILOpPure *op_ADD_497 = ADD(op_LSHIFT_466, op_LSHIFT_496); + RzILOpPure *cond_584 = ITE(DUP(op_EQ_436), op_ADD_497, VARL("h_tmp426")); + RzILOpPure *op_AND_586 = LOGAND(cond_584, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_590 = SHIFTL0(op_AND_586, SN(32, 0)); + RzILOpPure *op_OR_591 = LOGOR(op_AND_305, op_LSHIFT_590); + RzILOpEffect *op_ASSIGN_592 = WRITE_REG(bundle, Rdd_op, op_OR_591); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_593 = SEQN(2, seq_583, op_ASSIGN_592); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_297, seq_593); + return instruction_sequence; +} + +// Rdd = vrcmpys(Rss,Rtt):<<1:sat:raw:lo +RzILOpEffect *hex_il_op_m2_vrcmpys_s1_l(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_204 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rss, SN(32, 16)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_15, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_25 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_27 = LOGAND(op_RSHIFT_25, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_33 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_27), DUP(op_AND_27))), CAST(32, MSB(DUP(op_AND_27)), DUP(op_AND_27))), SN(32, 0)); + RzILOpPure *op_AND_36 = LOGAND(op_RSHIFT_33, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_39 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(CAST(16, MSB(op_AND_36), DUP(op_AND_36))), CAST(16, MSB(DUP(op_AND_36)), DUP(op_AND_36)))); + RzILOpPure *op_LSHIFT_42 = SHIFTL0(CAST(64, MSB(op_MUL_39), DUP(op_MUL_39)), SN(32, 1)); + RzILOpPure *op_RSHIFT_46 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_49 = LOGAND(op_RSHIFT_46, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_55 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_57 = LOGAND(op_RSHIFT_55, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_63 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_57), DUP(op_AND_57))), CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57))), SN(32, 16)); + RzILOpPure *op_AND_66 = LOGAND(op_RSHIFT_63, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_69 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_49), DUP(op_AND_49))), CAST(16, MSB(DUP(op_AND_49)), DUP(op_AND_49))), CAST(32, MSB(CAST(16, MSB(op_AND_66), DUP(op_AND_66))), CAST(16, MSB(DUP(op_AND_66)), DUP(op_AND_66)))); + RzILOpPure *op_LSHIFT_72 = SHIFTL0(CAST(64, MSB(op_MUL_69), DUP(op_MUL_69)), SN(32, 1)); + RzILOpPure *op_ADD_73 = ADD(op_LSHIFT_42, op_LSHIFT_72); + RzILOpPure *op_RSHIFT_82 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_85 = LOGAND(op_RSHIFT_82, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_91 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_93 = LOGAND(op_RSHIFT_91, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_99 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_93), DUP(op_AND_93))), CAST(32, MSB(DUP(op_AND_93)), DUP(op_AND_93))), SN(32, 0)); + RzILOpPure *op_AND_102 = LOGAND(op_RSHIFT_99, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_105 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_85), DUP(op_AND_85))), CAST(16, MSB(DUP(op_AND_85)), DUP(op_AND_85))), CAST(32, MSB(CAST(16, MSB(op_AND_102), DUP(op_AND_102))), CAST(16, MSB(DUP(op_AND_102)), DUP(op_AND_102)))); + RzILOpPure *op_LSHIFT_108 = SHIFTL0(CAST(64, MSB(op_MUL_105), DUP(op_MUL_105)), SN(32, 1)); + RzILOpPure *op_RSHIFT_112 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_115 = LOGAND(op_RSHIFT_112, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_121 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_123 = LOGAND(op_RSHIFT_121, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_129 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_123), DUP(op_AND_123))), CAST(32, MSB(DUP(op_AND_123)), DUP(op_AND_123))), SN(32, 16)); + RzILOpPure *op_AND_132 = LOGAND(op_RSHIFT_129, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_135 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_115), DUP(op_AND_115))), CAST(16, MSB(DUP(op_AND_115)), DUP(op_AND_115))), CAST(32, MSB(CAST(16, MSB(op_AND_132), DUP(op_AND_132))), CAST(16, MSB(DUP(op_AND_132)), DUP(op_AND_132)))); + RzILOpPure *op_LSHIFT_138 = SHIFTL0(CAST(64, MSB(op_MUL_135), DUP(op_MUL_135)), SN(32, 1)); + RzILOpPure *op_ADD_139 = ADD(op_LSHIFT_108, op_LSHIFT_138); + RzILOpPure *op_EQ_140 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_73), SN(32, 0), SN(32, 0x20)), op_ADD_139); + RzILOpPure *op_RSHIFT_208 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_211 = LOGAND(op_RSHIFT_208, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_217 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_219 = LOGAND(op_RSHIFT_217, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_225 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_219), DUP(op_AND_219))), CAST(32, MSB(DUP(op_AND_219)), DUP(op_AND_219))), SN(32, 0)); + RzILOpPure *op_AND_228 = LOGAND(op_RSHIFT_225, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_231 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_211), DUP(op_AND_211))), CAST(16, MSB(DUP(op_AND_211)), DUP(op_AND_211))), CAST(32, MSB(CAST(16, MSB(op_AND_228), DUP(op_AND_228))), CAST(16, MSB(DUP(op_AND_228)), DUP(op_AND_228)))); + RzILOpPure *op_LSHIFT_234 = SHIFTL0(CAST(64, MSB(op_MUL_231), DUP(op_MUL_231)), SN(32, 1)); + RzILOpPure *op_RSHIFT_238 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_241 = LOGAND(op_RSHIFT_238, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_247 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_249 = LOGAND(op_RSHIFT_247, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_255 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_249), DUP(op_AND_249))), CAST(32, MSB(DUP(op_AND_249)), DUP(op_AND_249))), SN(32, 16)); + RzILOpPure *op_AND_258 = LOGAND(op_RSHIFT_255, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_261 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_241), DUP(op_AND_241))), CAST(16, MSB(DUP(op_AND_241)), DUP(op_AND_241))), CAST(32, MSB(CAST(16, MSB(op_AND_258), DUP(op_AND_258))), CAST(16, MSB(DUP(op_AND_258)), DUP(op_AND_258)))); + RzILOpPure *op_LSHIFT_264 = SHIFTL0(CAST(64, MSB(op_MUL_261), DUP(op_MUL_261)), SN(32, 1)); + RzILOpPure *op_ADD_265 = ADD(op_LSHIFT_234, op_LSHIFT_264); + RzILOpPure *op_LT_268 = SLT(op_ADD_265, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_273 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_274 = NEG(op_LSHIFT_273); + RzILOpPure *op_LSHIFT_279 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_282 = SUB(op_LSHIFT_279, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_283 = ITE(op_LT_268, op_NEG_274, op_SUB_282); + RzILOpEffect *gcc_expr_284 = BRANCH(op_EQ_140, EMPTY(), set_usr_field_call_204); + + // h_tmp427 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_286 = SETL("h_tmp427", cond_283); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_287 = SEQN(2, gcc_expr_284, op_ASSIGN_hybrid_tmp_286); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1)) ? (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) : h_tmp427) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_144 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_147 = LOGAND(op_RSHIFT_144, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_153 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_155 = LOGAND(op_RSHIFT_153, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_161 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_155), DUP(op_AND_155))), CAST(32, MSB(DUP(op_AND_155)), DUP(op_AND_155))), SN(32, 0)); + RzILOpPure *op_AND_164 = LOGAND(op_RSHIFT_161, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_167 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_147), DUP(op_AND_147))), CAST(16, MSB(DUP(op_AND_147)), DUP(op_AND_147))), CAST(32, MSB(CAST(16, MSB(op_AND_164), DUP(op_AND_164))), CAST(16, MSB(DUP(op_AND_164)), DUP(op_AND_164)))); + RzILOpPure *op_LSHIFT_170 = SHIFTL0(CAST(64, MSB(op_MUL_167), DUP(op_MUL_167)), SN(32, 1)); + RzILOpPure *op_RSHIFT_174 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_177 = LOGAND(op_RSHIFT_174, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_183 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_185 = LOGAND(op_RSHIFT_183, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_191 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_185), DUP(op_AND_185))), CAST(32, MSB(DUP(op_AND_185)), DUP(op_AND_185))), SN(32, 16)); + RzILOpPure *op_AND_194 = LOGAND(op_RSHIFT_191, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_197 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_177), DUP(op_AND_177))), CAST(16, MSB(DUP(op_AND_177)), DUP(op_AND_177))), CAST(32, MSB(CAST(16, MSB(op_AND_194), DUP(op_AND_194))), CAST(16, MSB(DUP(op_AND_194)), DUP(op_AND_194)))); + RzILOpPure *op_LSHIFT_200 = SHIFTL0(CAST(64, MSB(op_MUL_197), DUP(op_MUL_197)), SN(32, 1)); + RzILOpPure *op_ADD_201 = ADD(op_LSHIFT_170, op_LSHIFT_200); + RzILOpPure *cond_288 = ITE(DUP(op_EQ_140), op_ADD_201, VARL("h_tmp427")); + RzILOpPure *op_AND_290 = LOGAND(cond_288, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_294 = SHIFTL0(op_AND_290, SN(32, 0x20)); + RzILOpPure *op_OR_295 = LOGOR(op_AND_7, op_LSHIFT_294); + RzILOpEffect *op_ASSIGN_296 = WRITE_REG(bundle, Rdd_op, op_OR_295); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_297 = SEQN(2, seq_287, op_ASSIGN_296); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_500 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_312 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_315 = LOGAND(op_RSHIFT_312, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_321 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_323 = LOGAND(op_RSHIFT_321, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_329 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_323), DUP(op_AND_323))), CAST(32, MSB(DUP(op_AND_323)), DUP(op_AND_323))), SN(32, 0)); + RzILOpPure *op_AND_332 = LOGAND(op_RSHIFT_329, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_335 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_315), DUP(op_AND_315))), CAST(16, MSB(DUP(op_AND_315)), DUP(op_AND_315))), CAST(32, MSB(CAST(16, MSB(op_AND_332), DUP(op_AND_332))), CAST(16, MSB(DUP(op_AND_332)), DUP(op_AND_332)))); + RzILOpPure *op_LSHIFT_338 = SHIFTL0(CAST(64, MSB(op_MUL_335), DUP(op_MUL_335)), SN(32, 1)); + RzILOpPure *op_RSHIFT_342 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_345 = LOGAND(op_RSHIFT_342, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_351 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_353 = LOGAND(op_RSHIFT_351, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_359 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_353), DUP(op_AND_353))), CAST(32, MSB(DUP(op_AND_353)), DUP(op_AND_353))), SN(32, 16)); + RzILOpPure *op_AND_362 = LOGAND(op_RSHIFT_359, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_365 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_345), DUP(op_AND_345))), CAST(16, MSB(DUP(op_AND_345)), DUP(op_AND_345))), CAST(32, MSB(CAST(16, MSB(op_AND_362), DUP(op_AND_362))), CAST(16, MSB(DUP(op_AND_362)), DUP(op_AND_362)))); + RzILOpPure *op_LSHIFT_368 = SHIFTL0(CAST(64, MSB(op_MUL_365), DUP(op_MUL_365)), SN(32, 1)); + RzILOpPure *op_ADD_369 = ADD(op_LSHIFT_338, op_LSHIFT_368); + RzILOpPure *op_RSHIFT_378 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_381 = LOGAND(op_RSHIFT_378, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_387 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_389 = LOGAND(op_RSHIFT_387, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_395 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_389), DUP(op_AND_389))), CAST(32, MSB(DUP(op_AND_389)), DUP(op_AND_389))), SN(32, 0)); + RzILOpPure *op_AND_398 = LOGAND(op_RSHIFT_395, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_401 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_381), DUP(op_AND_381))), CAST(16, MSB(DUP(op_AND_381)), DUP(op_AND_381))), CAST(32, MSB(CAST(16, MSB(op_AND_398), DUP(op_AND_398))), CAST(16, MSB(DUP(op_AND_398)), DUP(op_AND_398)))); + RzILOpPure *op_LSHIFT_404 = SHIFTL0(CAST(64, MSB(op_MUL_401), DUP(op_MUL_401)), SN(32, 1)); + RzILOpPure *op_RSHIFT_408 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_411 = LOGAND(op_RSHIFT_408, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_417 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_419 = LOGAND(op_RSHIFT_417, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_425 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_419), DUP(op_AND_419))), CAST(32, MSB(DUP(op_AND_419)), DUP(op_AND_419))), SN(32, 16)); + RzILOpPure *op_AND_428 = LOGAND(op_RSHIFT_425, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_431 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_411), DUP(op_AND_411))), CAST(16, MSB(DUP(op_AND_411)), DUP(op_AND_411))), CAST(32, MSB(CAST(16, MSB(op_AND_428), DUP(op_AND_428))), CAST(16, MSB(DUP(op_AND_428)), DUP(op_AND_428)))); + RzILOpPure *op_LSHIFT_434 = SHIFTL0(CAST(64, MSB(op_MUL_431), DUP(op_MUL_431)), SN(32, 1)); + RzILOpPure *op_ADD_435 = ADD(op_LSHIFT_404, op_LSHIFT_434); + RzILOpPure *op_EQ_436 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_369), SN(32, 0), SN(32, 0x20)), op_ADD_435); + RzILOpPure *op_RSHIFT_504 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_507 = LOGAND(op_RSHIFT_504, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_513 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_515 = LOGAND(op_RSHIFT_513, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_521 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_515), DUP(op_AND_515))), CAST(32, MSB(DUP(op_AND_515)), DUP(op_AND_515))), SN(32, 0)); + RzILOpPure *op_AND_524 = LOGAND(op_RSHIFT_521, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_527 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_507), DUP(op_AND_507))), CAST(16, MSB(DUP(op_AND_507)), DUP(op_AND_507))), CAST(32, MSB(CAST(16, MSB(op_AND_524), DUP(op_AND_524))), CAST(16, MSB(DUP(op_AND_524)), DUP(op_AND_524)))); + RzILOpPure *op_LSHIFT_530 = SHIFTL0(CAST(64, MSB(op_MUL_527), DUP(op_MUL_527)), SN(32, 1)); + RzILOpPure *op_RSHIFT_534 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_537 = LOGAND(op_RSHIFT_534, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_543 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_545 = LOGAND(op_RSHIFT_543, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_551 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_545), DUP(op_AND_545))), CAST(32, MSB(DUP(op_AND_545)), DUP(op_AND_545))), SN(32, 16)); + RzILOpPure *op_AND_554 = LOGAND(op_RSHIFT_551, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_557 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_537), DUP(op_AND_537))), CAST(16, MSB(DUP(op_AND_537)), DUP(op_AND_537))), CAST(32, MSB(CAST(16, MSB(op_AND_554), DUP(op_AND_554))), CAST(16, MSB(DUP(op_AND_554)), DUP(op_AND_554)))); + RzILOpPure *op_LSHIFT_560 = SHIFTL0(CAST(64, MSB(op_MUL_557), DUP(op_MUL_557)), SN(32, 1)); + RzILOpPure *op_ADD_561 = ADD(op_LSHIFT_530, op_LSHIFT_560); + RzILOpPure *op_LT_564 = SLT(op_ADD_561, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_569 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_570 = NEG(op_LSHIFT_569); + RzILOpPure *op_LSHIFT_575 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_578 = SUB(op_LSHIFT_575, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_579 = ITE(op_LT_564, op_NEG_570, op_SUB_578); + RzILOpEffect *gcc_expr_580 = BRANCH(op_EQ_436, EMPTY(), set_usr_field_call_500); + + // h_tmp428 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_582 = SETL("h_tmp428", cond_579); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_583 = SEQN(2, gcc_expr_580, op_ASSIGN_hybrid_tmp_582); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1)) ? (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) : h_tmp428) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_303 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_304 = LOGNOT(op_LSHIFT_303); + RzILOpPure *op_AND_305 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_304); + RzILOpPure *op_RSHIFT_440 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_443 = LOGAND(op_RSHIFT_440, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_449 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_451 = LOGAND(op_RSHIFT_449, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_457 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_451), DUP(op_AND_451))), CAST(32, MSB(DUP(op_AND_451)), DUP(op_AND_451))), SN(32, 0)); + RzILOpPure *op_AND_460 = LOGAND(op_RSHIFT_457, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_463 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_443), DUP(op_AND_443))), CAST(16, MSB(DUP(op_AND_443)), DUP(op_AND_443))), CAST(32, MSB(CAST(16, MSB(op_AND_460), DUP(op_AND_460))), CAST(16, MSB(DUP(op_AND_460)), DUP(op_AND_460)))); + RzILOpPure *op_LSHIFT_466 = SHIFTL0(CAST(64, MSB(op_MUL_463), DUP(op_MUL_463)), SN(32, 1)); + RzILOpPure *op_RSHIFT_470 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_473 = LOGAND(op_RSHIFT_470, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_479 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_481 = LOGAND(op_RSHIFT_479, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_487 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_481), DUP(op_AND_481))), CAST(32, MSB(DUP(op_AND_481)), DUP(op_AND_481))), SN(32, 16)); + RzILOpPure *op_AND_490 = LOGAND(op_RSHIFT_487, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_493 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_473), DUP(op_AND_473))), CAST(16, MSB(DUP(op_AND_473)), DUP(op_AND_473))), CAST(32, MSB(CAST(16, MSB(op_AND_490), DUP(op_AND_490))), CAST(16, MSB(DUP(op_AND_490)), DUP(op_AND_490)))); + RzILOpPure *op_LSHIFT_496 = SHIFTL0(CAST(64, MSB(op_MUL_493), DUP(op_MUL_493)), SN(32, 1)); + RzILOpPure *op_ADD_497 = ADD(op_LSHIFT_466, op_LSHIFT_496); + RzILOpPure *cond_584 = ITE(DUP(op_EQ_436), op_ADD_497, VARL("h_tmp428")); + RzILOpPure *op_AND_586 = LOGAND(cond_584, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_590 = SHIFTL0(op_AND_586, SN(32, 0)); + RzILOpPure *op_OR_591 = LOGOR(op_AND_305, op_LSHIFT_590); + RzILOpEffect *op_ASSIGN_592 = WRITE_REG(bundle, Rdd_op, op_OR_591); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_593 = SEQN(2, seq_583, op_ASSIGN_592); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_297, seq_593); + return instruction_sequence; +} + +// Rd = vrcmpys(Rss,Rtt):<<1:rnd:sat:raw:hi +RzILOpEffect *hex_il_op_m2_vrcmpys_s1rp_h(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_214 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rss, SN(32, 16)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_16, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_26 = SHIFTRA(Rtt, SN(32, 0x20)); + RzILOpPure *op_AND_28 = LOGAND(op_RSHIFT_26, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_34 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_28), DUP(op_AND_28))), CAST(32, MSB(DUP(op_AND_28)), DUP(op_AND_28))), SN(32, 0)); + RzILOpPure *op_AND_37 = LOGAND(op_RSHIFT_34, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_40 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_19), DUP(op_AND_19))), CAST(16, MSB(DUP(op_AND_19)), DUP(op_AND_19))), CAST(32, MSB(CAST(16, MSB(op_AND_37), DUP(op_AND_37))), CAST(16, MSB(DUP(op_AND_37)), DUP(op_AND_37)))); + RzILOpPure *op_LSHIFT_43 = SHIFTL0(CAST(64, MSB(op_MUL_40), DUP(op_MUL_40)), SN(32, 1)); + RzILOpPure *op_RSHIFT_47 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_50 = LOGAND(op_RSHIFT_47, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_56 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_58 = LOGAND(op_RSHIFT_56, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_64 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_58), DUP(op_AND_58))), CAST(32, MSB(DUP(op_AND_58)), DUP(op_AND_58))), SN(32, 16)); + RzILOpPure *op_AND_67 = LOGAND(op_RSHIFT_64, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_70 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_50), DUP(op_AND_50))), CAST(16, MSB(DUP(op_AND_50)), DUP(op_AND_50))), CAST(32, MSB(CAST(16, MSB(op_AND_67), DUP(op_AND_67))), CAST(16, MSB(DUP(op_AND_67)), DUP(op_AND_67)))); + RzILOpPure *op_LSHIFT_73 = SHIFTL0(CAST(64, MSB(op_MUL_70), DUP(op_MUL_70)), SN(32, 1)); + RzILOpPure *op_ADD_74 = ADD(op_LSHIFT_43, op_LSHIFT_73); + RzILOpPure *op_ADD_77 = ADD(op_ADD_74, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_86 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_89 = LOGAND(op_RSHIFT_86, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_95 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_97 = LOGAND(op_RSHIFT_95, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_103 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_97), DUP(op_AND_97))), CAST(32, MSB(DUP(op_AND_97)), DUP(op_AND_97))), SN(32, 0)); + RzILOpPure *op_AND_106 = LOGAND(op_RSHIFT_103, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_109 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_89), DUP(op_AND_89))), CAST(16, MSB(DUP(op_AND_89)), DUP(op_AND_89))), CAST(32, MSB(CAST(16, MSB(op_AND_106), DUP(op_AND_106))), CAST(16, MSB(DUP(op_AND_106)), DUP(op_AND_106)))); + RzILOpPure *op_LSHIFT_112 = SHIFTL0(CAST(64, MSB(op_MUL_109), DUP(op_MUL_109)), SN(32, 1)); + RzILOpPure *op_RSHIFT_116 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_119 = LOGAND(op_RSHIFT_116, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_125 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_127 = LOGAND(op_RSHIFT_125, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_133 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_127), DUP(op_AND_127))), CAST(32, MSB(DUP(op_AND_127)), DUP(op_AND_127))), SN(32, 16)); + RzILOpPure *op_AND_136 = LOGAND(op_RSHIFT_133, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_139 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_119), DUP(op_AND_119))), CAST(16, MSB(DUP(op_AND_119)), DUP(op_AND_119))), CAST(32, MSB(CAST(16, MSB(op_AND_136), DUP(op_AND_136))), CAST(16, MSB(DUP(op_AND_136)), DUP(op_AND_136)))); + RzILOpPure *op_LSHIFT_142 = SHIFTL0(CAST(64, MSB(op_MUL_139), DUP(op_MUL_139)), SN(32, 1)); + RzILOpPure *op_ADD_143 = ADD(op_LSHIFT_112, op_LSHIFT_142); + RzILOpPure *op_ADD_146 = ADD(op_ADD_143, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_EQ_147 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_77), SN(32, 0), SN(32, 0x20)), op_ADD_146); + RzILOpPure *op_RSHIFT_218 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_221 = LOGAND(op_RSHIFT_218, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_227 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_229 = LOGAND(op_RSHIFT_227, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_235 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_229), DUP(op_AND_229))), CAST(32, MSB(DUP(op_AND_229)), DUP(op_AND_229))), SN(32, 0)); + RzILOpPure *op_AND_238 = LOGAND(op_RSHIFT_235, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_241 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_221), DUP(op_AND_221))), CAST(16, MSB(DUP(op_AND_221)), DUP(op_AND_221))), CAST(32, MSB(CAST(16, MSB(op_AND_238), DUP(op_AND_238))), CAST(16, MSB(DUP(op_AND_238)), DUP(op_AND_238)))); + RzILOpPure *op_LSHIFT_244 = SHIFTL0(CAST(64, MSB(op_MUL_241), DUP(op_MUL_241)), SN(32, 1)); + RzILOpPure *op_RSHIFT_248 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_251 = LOGAND(op_RSHIFT_248, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_257 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_259 = LOGAND(op_RSHIFT_257, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_265 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_259), DUP(op_AND_259))), CAST(32, MSB(DUP(op_AND_259)), DUP(op_AND_259))), SN(32, 16)); + RzILOpPure *op_AND_268 = LOGAND(op_RSHIFT_265, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_271 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_251), DUP(op_AND_251))), CAST(16, MSB(DUP(op_AND_251)), DUP(op_AND_251))), CAST(32, MSB(CAST(16, MSB(op_AND_268), DUP(op_AND_268))), CAST(16, MSB(DUP(op_AND_268)), DUP(op_AND_268)))); + RzILOpPure *op_LSHIFT_274 = SHIFTL0(CAST(64, MSB(op_MUL_271), DUP(op_MUL_271)), SN(32, 1)); + RzILOpPure *op_ADD_275 = ADD(op_LSHIFT_244, op_LSHIFT_274); + RzILOpPure *op_ADD_278 = ADD(op_ADD_275, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_LT_281 = SLT(op_ADD_278, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_286 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_287 = NEG(op_LSHIFT_286); + RzILOpPure *op_LSHIFT_292 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_295 = SUB(op_LSHIFT_292, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_296 = ITE(op_LT_281, op_NEG_287, op_SUB_295); + RzILOpEffect *gcc_expr_297 = BRANCH(op_EQ_147, EMPTY(), set_usr_field_call_214); + + // h_tmp429 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_299 = SETL("h_tmp429", cond_296); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_300 = SEQN(2, gcc_expr_297, op_ASSIGN_hybrid_tmp_299); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << 0x10)))) | (((ut64) (((st32) ((st16) ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000)) ? (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000) : h_tmp429) >> 0x10) & ((st64) 0xffff)))) & 0xffff)) << 0x10))); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffff), SN(32, 16)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_8 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_6); + RzILOpPure *op_RSHIFT_151 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_154 = LOGAND(op_RSHIFT_151, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_160 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_162 = LOGAND(op_RSHIFT_160, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_168 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_162), DUP(op_AND_162))), CAST(32, MSB(DUP(op_AND_162)), DUP(op_AND_162))), SN(32, 0)); + RzILOpPure *op_AND_171 = LOGAND(op_RSHIFT_168, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_174 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_154), DUP(op_AND_154))), CAST(16, MSB(DUP(op_AND_154)), DUP(op_AND_154))), CAST(32, MSB(CAST(16, MSB(op_AND_171), DUP(op_AND_171))), CAST(16, MSB(DUP(op_AND_171)), DUP(op_AND_171)))); + RzILOpPure *op_LSHIFT_177 = SHIFTL0(CAST(64, MSB(op_MUL_174), DUP(op_MUL_174)), SN(32, 1)); + RzILOpPure *op_RSHIFT_181 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_184 = LOGAND(op_RSHIFT_181, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_190 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_192 = LOGAND(op_RSHIFT_190, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_198 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_192), DUP(op_AND_192))), CAST(32, MSB(DUP(op_AND_192)), DUP(op_AND_192))), SN(32, 16)); + RzILOpPure *op_AND_201 = LOGAND(op_RSHIFT_198, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_204 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_184), DUP(op_AND_184))), CAST(16, MSB(DUP(op_AND_184)), DUP(op_AND_184))), CAST(32, MSB(CAST(16, MSB(op_AND_201), DUP(op_AND_201))), CAST(16, MSB(DUP(op_AND_201)), DUP(op_AND_201)))); + RzILOpPure *op_LSHIFT_207 = SHIFTL0(CAST(64, MSB(op_MUL_204), DUP(op_MUL_204)), SN(32, 1)); + RzILOpPure *op_ADD_208 = ADD(op_LSHIFT_177, op_LSHIFT_207); + RzILOpPure *op_ADD_211 = ADD(op_ADD_208, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *cond_301 = ITE(DUP(op_EQ_147), op_ADD_211, VARL("h_tmp429")); + RzILOpPure *op_RSHIFT_305 = SHIFTRA(cond_301, SN(32, 16)); + RzILOpPure *op_AND_308 = LOGAND(op_RSHIFT_305, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_312 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_308), DUP(op_AND_308))), CAST(16, MSB(DUP(op_AND_308)), DUP(op_AND_308))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_317 = SHIFTL0(CAST(64, IL_FALSE, op_AND_312), SN(32, 16)); + RzILOpPure *op_OR_319 = LOGOR(CAST(64, IL_FALSE, op_AND_8), op_LSHIFT_317); + RzILOpEffect *op_ASSIGN_321 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_319)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_322 = SEQN(2, seq_300, op_ASSIGN_321); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_535 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_338 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_341 = LOGAND(op_RSHIFT_338, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_347 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_349 = LOGAND(op_RSHIFT_347, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_355 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_349), DUP(op_AND_349))), CAST(32, MSB(DUP(op_AND_349)), DUP(op_AND_349))), SN(32, 0)); + RzILOpPure *op_AND_358 = LOGAND(op_RSHIFT_355, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_361 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_341), DUP(op_AND_341))), CAST(16, MSB(DUP(op_AND_341)), DUP(op_AND_341))), CAST(32, MSB(CAST(16, MSB(op_AND_358), DUP(op_AND_358))), CAST(16, MSB(DUP(op_AND_358)), DUP(op_AND_358)))); + RzILOpPure *op_LSHIFT_364 = SHIFTL0(CAST(64, MSB(op_MUL_361), DUP(op_MUL_361)), SN(32, 1)); + RzILOpPure *op_RSHIFT_368 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_371 = LOGAND(op_RSHIFT_368, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_377 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_379 = LOGAND(op_RSHIFT_377, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_385 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_379), DUP(op_AND_379))), CAST(32, MSB(DUP(op_AND_379)), DUP(op_AND_379))), SN(32, 16)); + RzILOpPure *op_AND_388 = LOGAND(op_RSHIFT_385, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_391 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_371), DUP(op_AND_371))), CAST(16, MSB(DUP(op_AND_371)), DUP(op_AND_371))), CAST(32, MSB(CAST(16, MSB(op_AND_388), DUP(op_AND_388))), CAST(16, MSB(DUP(op_AND_388)), DUP(op_AND_388)))); + RzILOpPure *op_LSHIFT_394 = SHIFTL0(CAST(64, MSB(op_MUL_391), DUP(op_MUL_391)), SN(32, 1)); + RzILOpPure *op_ADD_395 = ADD(op_LSHIFT_364, op_LSHIFT_394); + RzILOpPure *op_ADD_398 = ADD(op_ADD_395, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_407 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_410 = LOGAND(op_RSHIFT_407, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_416 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_418 = LOGAND(op_RSHIFT_416, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_424 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_418), DUP(op_AND_418))), CAST(32, MSB(DUP(op_AND_418)), DUP(op_AND_418))), SN(32, 0)); + RzILOpPure *op_AND_427 = LOGAND(op_RSHIFT_424, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_430 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_410), DUP(op_AND_410))), CAST(16, MSB(DUP(op_AND_410)), DUP(op_AND_410))), CAST(32, MSB(CAST(16, MSB(op_AND_427), DUP(op_AND_427))), CAST(16, MSB(DUP(op_AND_427)), DUP(op_AND_427)))); + RzILOpPure *op_LSHIFT_433 = SHIFTL0(CAST(64, MSB(op_MUL_430), DUP(op_MUL_430)), SN(32, 1)); + RzILOpPure *op_RSHIFT_437 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_440 = LOGAND(op_RSHIFT_437, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_446 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_448 = LOGAND(op_RSHIFT_446, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_454 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_448), DUP(op_AND_448))), CAST(32, MSB(DUP(op_AND_448)), DUP(op_AND_448))), SN(32, 16)); + RzILOpPure *op_AND_457 = LOGAND(op_RSHIFT_454, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_460 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_440), DUP(op_AND_440))), CAST(16, MSB(DUP(op_AND_440)), DUP(op_AND_440))), CAST(32, MSB(CAST(16, MSB(op_AND_457), DUP(op_AND_457))), CAST(16, MSB(DUP(op_AND_457)), DUP(op_AND_457)))); + RzILOpPure *op_LSHIFT_463 = SHIFTL0(CAST(64, MSB(op_MUL_460), DUP(op_MUL_460)), SN(32, 1)); + RzILOpPure *op_ADD_464 = ADD(op_LSHIFT_433, op_LSHIFT_463); + RzILOpPure *op_ADD_467 = ADD(op_ADD_464, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_EQ_468 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_398), SN(32, 0), SN(32, 0x20)), op_ADD_467); + RzILOpPure *op_RSHIFT_539 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_542 = LOGAND(op_RSHIFT_539, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_548 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_550 = LOGAND(op_RSHIFT_548, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_556 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_550), DUP(op_AND_550))), CAST(32, MSB(DUP(op_AND_550)), DUP(op_AND_550))), SN(32, 0)); + RzILOpPure *op_AND_559 = LOGAND(op_RSHIFT_556, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_562 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_542), DUP(op_AND_542))), CAST(16, MSB(DUP(op_AND_542)), DUP(op_AND_542))), CAST(32, MSB(CAST(16, MSB(op_AND_559), DUP(op_AND_559))), CAST(16, MSB(DUP(op_AND_559)), DUP(op_AND_559)))); + RzILOpPure *op_LSHIFT_565 = SHIFTL0(CAST(64, MSB(op_MUL_562), DUP(op_MUL_562)), SN(32, 1)); + RzILOpPure *op_RSHIFT_569 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_572 = LOGAND(op_RSHIFT_569, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_578 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_580 = LOGAND(op_RSHIFT_578, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_586 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_580), DUP(op_AND_580))), CAST(32, MSB(DUP(op_AND_580)), DUP(op_AND_580))), SN(32, 16)); + RzILOpPure *op_AND_589 = LOGAND(op_RSHIFT_586, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_592 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_572), DUP(op_AND_572))), CAST(16, MSB(DUP(op_AND_572)), DUP(op_AND_572))), CAST(32, MSB(CAST(16, MSB(op_AND_589), DUP(op_AND_589))), CAST(16, MSB(DUP(op_AND_589)), DUP(op_AND_589)))); + RzILOpPure *op_LSHIFT_595 = SHIFTL0(CAST(64, MSB(op_MUL_592), DUP(op_MUL_592)), SN(32, 1)); + RzILOpPure *op_ADD_596 = ADD(op_LSHIFT_565, op_LSHIFT_595); + RzILOpPure *op_ADD_599 = ADD(op_ADD_596, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_LT_602 = SLT(op_ADD_599, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_607 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_608 = NEG(op_LSHIFT_607); + RzILOpPure *op_LSHIFT_613 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_616 = SUB(op_LSHIFT_613, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_617 = ITE(op_LT_602, op_NEG_608, op_SUB_616); + RzILOpEffect *gcc_expr_618 = BRANCH(op_EQ_468, EMPTY(), set_usr_field_call_535); + + // h_tmp430 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_620 = SETL("h_tmp430", cond_617); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_621 = SEQN(2, gcc_expr_618, op_ASSIGN_hybrid_tmp_620); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << 0x0)))) | (((ut64) (((st32) ((st16) ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000)) ? (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000) : h_tmp430) >> 0x10) & ((st64) 0xffff)))) & 0xffff)) << 0x0))); + RzILOpPure *op_LSHIFT_328 = SHIFTL0(SN(64, 0xffff), SN(32, 0)); + RzILOpPure *op_NOT_329 = LOGNOT(op_LSHIFT_328); + RzILOpPure *op_AND_331 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_329); + RzILOpPure *op_RSHIFT_472 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_475 = LOGAND(op_RSHIFT_472, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_481 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_483 = LOGAND(op_RSHIFT_481, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_489 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_483), DUP(op_AND_483))), CAST(32, MSB(DUP(op_AND_483)), DUP(op_AND_483))), SN(32, 0)); + RzILOpPure *op_AND_492 = LOGAND(op_RSHIFT_489, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_495 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_475), DUP(op_AND_475))), CAST(16, MSB(DUP(op_AND_475)), DUP(op_AND_475))), CAST(32, MSB(CAST(16, MSB(op_AND_492), DUP(op_AND_492))), CAST(16, MSB(DUP(op_AND_492)), DUP(op_AND_492)))); + RzILOpPure *op_LSHIFT_498 = SHIFTL0(CAST(64, MSB(op_MUL_495), DUP(op_MUL_495)), SN(32, 1)); + RzILOpPure *op_RSHIFT_502 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_505 = LOGAND(op_RSHIFT_502, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_511 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_513 = LOGAND(op_RSHIFT_511, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_519 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_513), DUP(op_AND_513))), CAST(32, MSB(DUP(op_AND_513)), DUP(op_AND_513))), SN(32, 16)); + RzILOpPure *op_AND_522 = LOGAND(op_RSHIFT_519, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_525 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_505), DUP(op_AND_505))), CAST(16, MSB(DUP(op_AND_505)), DUP(op_AND_505))), CAST(32, MSB(CAST(16, MSB(op_AND_522), DUP(op_AND_522))), CAST(16, MSB(DUP(op_AND_522)), DUP(op_AND_522)))); + RzILOpPure *op_LSHIFT_528 = SHIFTL0(CAST(64, MSB(op_MUL_525), DUP(op_MUL_525)), SN(32, 1)); + RzILOpPure *op_ADD_529 = ADD(op_LSHIFT_498, op_LSHIFT_528); + RzILOpPure *op_ADD_532 = ADD(op_ADD_529, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *cond_622 = ITE(DUP(op_EQ_468), op_ADD_532, VARL("h_tmp430")); + RzILOpPure *op_RSHIFT_626 = SHIFTRA(cond_622, SN(32, 16)); + RzILOpPure *op_AND_629 = LOGAND(op_RSHIFT_626, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_633 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_629), DUP(op_AND_629))), CAST(16, MSB(DUP(op_AND_629)), DUP(op_AND_629))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_638 = SHIFTL0(CAST(64, IL_FALSE, op_AND_633), SN(32, 0)); + RzILOpPure *op_OR_640 = LOGOR(CAST(64, IL_FALSE, op_AND_331), op_LSHIFT_638); + RzILOpEffect *op_ASSIGN_642 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_640)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_643 = SEQN(2, seq_621, op_ASSIGN_642); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_322, seq_643); + return instruction_sequence; +} + +// Rd = vrcmpys(Rss,Rtt):<<1:rnd:sat:raw:lo +RzILOpEffect *hex_il_op_m2_vrcmpys_s1rp_l(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_214 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rss, SN(32, 16)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_16, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_26 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_28 = LOGAND(op_RSHIFT_26, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_34 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_28), DUP(op_AND_28))), CAST(32, MSB(DUP(op_AND_28)), DUP(op_AND_28))), SN(32, 0)); + RzILOpPure *op_AND_37 = LOGAND(op_RSHIFT_34, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_40 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_19), DUP(op_AND_19))), CAST(16, MSB(DUP(op_AND_19)), DUP(op_AND_19))), CAST(32, MSB(CAST(16, MSB(op_AND_37), DUP(op_AND_37))), CAST(16, MSB(DUP(op_AND_37)), DUP(op_AND_37)))); + RzILOpPure *op_LSHIFT_43 = SHIFTL0(CAST(64, MSB(op_MUL_40), DUP(op_MUL_40)), SN(32, 1)); + RzILOpPure *op_RSHIFT_47 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_50 = LOGAND(op_RSHIFT_47, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_56 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_58 = LOGAND(op_RSHIFT_56, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_64 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_58), DUP(op_AND_58))), CAST(32, MSB(DUP(op_AND_58)), DUP(op_AND_58))), SN(32, 16)); + RzILOpPure *op_AND_67 = LOGAND(op_RSHIFT_64, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_70 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_50), DUP(op_AND_50))), CAST(16, MSB(DUP(op_AND_50)), DUP(op_AND_50))), CAST(32, MSB(CAST(16, MSB(op_AND_67), DUP(op_AND_67))), CAST(16, MSB(DUP(op_AND_67)), DUP(op_AND_67)))); + RzILOpPure *op_LSHIFT_73 = SHIFTL0(CAST(64, MSB(op_MUL_70), DUP(op_MUL_70)), SN(32, 1)); + RzILOpPure *op_ADD_74 = ADD(op_LSHIFT_43, op_LSHIFT_73); + RzILOpPure *op_ADD_77 = ADD(op_ADD_74, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_86 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_89 = LOGAND(op_RSHIFT_86, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_95 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_97 = LOGAND(op_RSHIFT_95, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_103 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_97), DUP(op_AND_97))), CAST(32, MSB(DUP(op_AND_97)), DUP(op_AND_97))), SN(32, 0)); + RzILOpPure *op_AND_106 = LOGAND(op_RSHIFT_103, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_109 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_89), DUP(op_AND_89))), CAST(16, MSB(DUP(op_AND_89)), DUP(op_AND_89))), CAST(32, MSB(CAST(16, MSB(op_AND_106), DUP(op_AND_106))), CAST(16, MSB(DUP(op_AND_106)), DUP(op_AND_106)))); + RzILOpPure *op_LSHIFT_112 = SHIFTL0(CAST(64, MSB(op_MUL_109), DUP(op_MUL_109)), SN(32, 1)); + RzILOpPure *op_RSHIFT_116 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_119 = LOGAND(op_RSHIFT_116, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_125 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_127 = LOGAND(op_RSHIFT_125, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_133 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_127), DUP(op_AND_127))), CAST(32, MSB(DUP(op_AND_127)), DUP(op_AND_127))), SN(32, 16)); + RzILOpPure *op_AND_136 = LOGAND(op_RSHIFT_133, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_139 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_119), DUP(op_AND_119))), CAST(16, MSB(DUP(op_AND_119)), DUP(op_AND_119))), CAST(32, MSB(CAST(16, MSB(op_AND_136), DUP(op_AND_136))), CAST(16, MSB(DUP(op_AND_136)), DUP(op_AND_136)))); + RzILOpPure *op_LSHIFT_142 = SHIFTL0(CAST(64, MSB(op_MUL_139), DUP(op_MUL_139)), SN(32, 1)); + RzILOpPure *op_ADD_143 = ADD(op_LSHIFT_112, op_LSHIFT_142); + RzILOpPure *op_ADD_146 = ADD(op_ADD_143, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_EQ_147 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_77), SN(32, 0), SN(32, 0x20)), op_ADD_146); + RzILOpPure *op_RSHIFT_218 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_221 = LOGAND(op_RSHIFT_218, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_227 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_229 = LOGAND(op_RSHIFT_227, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_235 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_229), DUP(op_AND_229))), CAST(32, MSB(DUP(op_AND_229)), DUP(op_AND_229))), SN(32, 0)); + RzILOpPure *op_AND_238 = LOGAND(op_RSHIFT_235, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_241 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_221), DUP(op_AND_221))), CAST(16, MSB(DUP(op_AND_221)), DUP(op_AND_221))), CAST(32, MSB(CAST(16, MSB(op_AND_238), DUP(op_AND_238))), CAST(16, MSB(DUP(op_AND_238)), DUP(op_AND_238)))); + RzILOpPure *op_LSHIFT_244 = SHIFTL0(CAST(64, MSB(op_MUL_241), DUP(op_MUL_241)), SN(32, 1)); + RzILOpPure *op_RSHIFT_248 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_251 = LOGAND(op_RSHIFT_248, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_257 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_259 = LOGAND(op_RSHIFT_257, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_265 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_259), DUP(op_AND_259))), CAST(32, MSB(DUP(op_AND_259)), DUP(op_AND_259))), SN(32, 16)); + RzILOpPure *op_AND_268 = LOGAND(op_RSHIFT_265, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_271 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_251), DUP(op_AND_251))), CAST(16, MSB(DUP(op_AND_251)), DUP(op_AND_251))), CAST(32, MSB(CAST(16, MSB(op_AND_268), DUP(op_AND_268))), CAST(16, MSB(DUP(op_AND_268)), DUP(op_AND_268)))); + RzILOpPure *op_LSHIFT_274 = SHIFTL0(CAST(64, MSB(op_MUL_271), DUP(op_MUL_271)), SN(32, 1)); + RzILOpPure *op_ADD_275 = ADD(op_LSHIFT_244, op_LSHIFT_274); + RzILOpPure *op_ADD_278 = ADD(op_ADD_275, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_LT_281 = SLT(op_ADD_278, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_286 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_287 = NEG(op_LSHIFT_286); + RzILOpPure *op_LSHIFT_292 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_295 = SUB(op_LSHIFT_292, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_296 = ITE(op_LT_281, op_NEG_287, op_SUB_295); + RzILOpEffect *gcc_expr_297 = BRANCH(op_EQ_147, EMPTY(), set_usr_field_call_214); + + // h_tmp431 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_299 = SETL("h_tmp431", cond_296); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_300 = SEQN(2, gcc_expr_297, op_ASSIGN_hybrid_tmp_299); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << 0x10)))) | (((ut64) (((st32) ((st16) ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000)) ? (((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000) : h_tmp431) >> 0x10) & ((st64) 0xffff)))) & 0xffff)) << 0x10))); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffff), SN(32, 16)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_8 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_6); + RzILOpPure *op_RSHIFT_151 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_154 = LOGAND(op_RSHIFT_151, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_160 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_162 = LOGAND(op_RSHIFT_160, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_168 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_162), DUP(op_AND_162))), CAST(32, MSB(DUP(op_AND_162)), DUP(op_AND_162))), SN(32, 0)); + RzILOpPure *op_AND_171 = LOGAND(op_RSHIFT_168, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_174 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_154), DUP(op_AND_154))), CAST(16, MSB(DUP(op_AND_154)), DUP(op_AND_154))), CAST(32, MSB(CAST(16, MSB(op_AND_171), DUP(op_AND_171))), CAST(16, MSB(DUP(op_AND_171)), DUP(op_AND_171)))); + RzILOpPure *op_LSHIFT_177 = SHIFTL0(CAST(64, MSB(op_MUL_174), DUP(op_MUL_174)), SN(32, 1)); + RzILOpPure *op_RSHIFT_181 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_184 = LOGAND(op_RSHIFT_181, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_190 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_192 = LOGAND(op_RSHIFT_190, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_198 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_192), DUP(op_AND_192))), CAST(32, MSB(DUP(op_AND_192)), DUP(op_AND_192))), SN(32, 16)); + RzILOpPure *op_AND_201 = LOGAND(op_RSHIFT_198, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_204 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_184), DUP(op_AND_184))), CAST(16, MSB(DUP(op_AND_184)), DUP(op_AND_184))), CAST(32, MSB(CAST(16, MSB(op_AND_201), DUP(op_AND_201))), CAST(16, MSB(DUP(op_AND_201)), DUP(op_AND_201)))); + RzILOpPure *op_LSHIFT_207 = SHIFTL0(CAST(64, MSB(op_MUL_204), DUP(op_MUL_204)), SN(32, 1)); + RzILOpPure *op_ADD_208 = ADD(op_LSHIFT_177, op_LSHIFT_207); + RzILOpPure *op_ADD_211 = ADD(op_ADD_208, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *cond_301 = ITE(DUP(op_EQ_147), op_ADD_211, VARL("h_tmp431")); + RzILOpPure *op_RSHIFT_305 = SHIFTRA(cond_301, SN(32, 16)); + RzILOpPure *op_AND_308 = LOGAND(op_RSHIFT_305, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_312 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_308), DUP(op_AND_308))), CAST(16, MSB(DUP(op_AND_308)), DUP(op_AND_308))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_317 = SHIFTL0(CAST(64, IL_FALSE, op_AND_312), SN(32, 16)); + RzILOpPure *op_OR_319 = LOGOR(CAST(64, IL_FALSE, op_AND_8), op_LSHIFT_317); + RzILOpEffect *op_ASSIGN_321 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_319)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_322 = SEQN(2, seq_300, op_ASSIGN_321); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_535 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_338 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_341 = LOGAND(op_RSHIFT_338, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_347 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_349 = LOGAND(op_RSHIFT_347, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_355 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_349), DUP(op_AND_349))), CAST(32, MSB(DUP(op_AND_349)), DUP(op_AND_349))), SN(32, 0)); + RzILOpPure *op_AND_358 = LOGAND(op_RSHIFT_355, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_361 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_341), DUP(op_AND_341))), CAST(16, MSB(DUP(op_AND_341)), DUP(op_AND_341))), CAST(32, MSB(CAST(16, MSB(op_AND_358), DUP(op_AND_358))), CAST(16, MSB(DUP(op_AND_358)), DUP(op_AND_358)))); + RzILOpPure *op_LSHIFT_364 = SHIFTL0(CAST(64, MSB(op_MUL_361), DUP(op_MUL_361)), SN(32, 1)); + RzILOpPure *op_RSHIFT_368 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_371 = LOGAND(op_RSHIFT_368, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_377 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_379 = LOGAND(op_RSHIFT_377, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_385 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_379), DUP(op_AND_379))), CAST(32, MSB(DUP(op_AND_379)), DUP(op_AND_379))), SN(32, 16)); + RzILOpPure *op_AND_388 = LOGAND(op_RSHIFT_385, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_391 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_371), DUP(op_AND_371))), CAST(16, MSB(DUP(op_AND_371)), DUP(op_AND_371))), CAST(32, MSB(CAST(16, MSB(op_AND_388), DUP(op_AND_388))), CAST(16, MSB(DUP(op_AND_388)), DUP(op_AND_388)))); + RzILOpPure *op_LSHIFT_394 = SHIFTL0(CAST(64, MSB(op_MUL_391), DUP(op_MUL_391)), SN(32, 1)); + RzILOpPure *op_ADD_395 = ADD(op_LSHIFT_364, op_LSHIFT_394); + RzILOpPure *op_ADD_398 = ADD(op_ADD_395, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_407 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_410 = LOGAND(op_RSHIFT_407, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_416 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_418 = LOGAND(op_RSHIFT_416, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_424 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_418), DUP(op_AND_418))), CAST(32, MSB(DUP(op_AND_418)), DUP(op_AND_418))), SN(32, 0)); + RzILOpPure *op_AND_427 = LOGAND(op_RSHIFT_424, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_430 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_410), DUP(op_AND_410))), CAST(16, MSB(DUP(op_AND_410)), DUP(op_AND_410))), CAST(32, MSB(CAST(16, MSB(op_AND_427), DUP(op_AND_427))), CAST(16, MSB(DUP(op_AND_427)), DUP(op_AND_427)))); + RzILOpPure *op_LSHIFT_433 = SHIFTL0(CAST(64, MSB(op_MUL_430), DUP(op_MUL_430)), SN(32, 1)); + RzILOpPure *op_RSHIFT_437 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_440 = LOGAND(op_RSHIFT_437, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_446 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_448 = LOGAND(op_RSHIFT_446, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_454 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_448), DUP(op_AND_448))), CAST(32, MSB(DUP(op_AND_448)), DUP(op_AND_448))), SN(32, 16)); + RzILOpPure *op_AND_457 = LOGAND(op_RSHIFT_454, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_460 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_440), DUP(op_AND_440))), CAST(16, MSB(DUP(op_AND_440)), DUP(op_AND_440))), CAST(32, MSB(CAST(16, MSB(op_AND_457), DUP(op_AND_457))), CAST(16, MSB(DUP(op_AND_457)), DUP(op_AND_457)))); + RzILOpPure *op_LSHIFT_463 = SHIFTL0(CAST(64, MSB(op_MUL_460), DUP(op_MUL_460)), SN(32, 1)); + RzILOpPure *op_ADD_464 = ADD(op_LSHIFT_433, op_LSHIFT_463); + RzILOpPure *op_ADD_467 = ADD(op_ADD_464, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_EQ_468 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_398), SN(32, 0), SN(32, 0x20)), op_ADD_467); + RzILOpPure *op_RSHIFT_539 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_542 = LOGAND(op_RSHIFT_539, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_548 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_550 = LOGAND(op_RSHIFT_548, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_556 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_550), DUP(op_AND_550))), CAST(32, MSB(DUP(op_AND_550)), DUP(op_AND_550))), SN(32, 0)); + RzILOpPure *op_AND_559 = LOGAND(op_RSHIFT_556, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_562 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_542), DUP(op_AND_542))), CAST(16, MSB(DUP(op_AND_542)), DUP(op_AND_542))), CAST(32, MSB(CAST(16, MSB(op_AND_559), DUP(op_AND_559))), CAST(16, MSB(DUP(op_AND_559)), DUP(op_AND_559)))); + RzILOpPure *op_LSHIFT_565 = SHIFTL0(CAST(64, MSB(op_MUL_562), DUP(op_MUL_562)), SN(32, 1)); + RzILOpPure *op_RSHIFT_569 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_572 = LOGAND(op_RSHIFT_569, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_578 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_580 = LOGAND(op_RSHIFT_578, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_586 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_580), DUP(op_AND_580))), CAST(32, MSB(DUP(op_AND_580)), DUP(op_AND_580))), SN(32, 16)); + RzILOpPure *op_AND_589 = LOGAND(op_RSHIFT_586, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_592 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_572), DUP(op_AND_572))), CAST(16, MSB(DUP(op_AND_572)), DUP(op_AND_572))), CAST(32, MSB(CAST(16, MSB(op_AND_589), DUP(op_AND_589))), CAST(16, MSB(DUP(op_AND_589)), DUP(op_AND_589)))); + RzILOpPure *op_LSHIFT_595 = SHIFTL0(CAST(64, MSB(op_MUL_592), DUP(op_MUL_592)), SN(32, 1)); + RzILOpPure *op_ADD_596 = ADD(op_LSHIFT_565, op_LSHIFT_595); + RzILOpPure *op_ADD_599 = ADD(op_ADD_596, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_LT_602 = SLT(op_ADD_599, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_607 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_608 = NEG(op_LSHIFT_607); + RzILOpPure *op_LSHIFT_613 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_616 = SUB(op_LSHIFT_613, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_617 = ITE(op_LT_602, op_NEG_608, op_SUB_616); + RzILOpEffect *gcc_expr_618 = BRANCH(op_EQ_468, EMPTY(), set_usr_field_call_535); + + // h_tmp432 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_620 = SETL("h_tmp432", cond_617); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st16 ...; + RzILOpEffect *seq_621 = SEQN(2, gcc_expr_618, op_ASSIGN_hybrid_tmp_620); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << 0x0)))) | (((ut64) (((st32) ((st16) ((((sextract64(((ut64) (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000)), 0x0, 0x20) == (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000)) ? (((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x0) & ((st64) 0xffff))))) << 0x1) + (((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) >> 0x10) & ((st64) 0xffff))))) << 0x1) + ((st64) 0x8000) : h_tmp432) >> 0x10) & ((st64) 0xffff)))) & 0xffff)) << 0x0))); + RzILOpPure *op_LSHIFT_328 = SHIFTL0(SN(64, 0xffff), SN(32, 0)); + RzILOpPure *op_NOT_329 = LOGNOT(op_LSHIFT_328); + RzILOpPure *op_AND_331 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_329); + RzILOpPure *op_RSHIFT_472 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_475 = LOGAND(op_RSHIFT_472, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_481 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_483 = LOGAND(op_RSHIFT_481, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_489 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_483), DUP(op_AND_483))), CAST(32, MSB(DUP(op_AND_483)), DUP(op_AND_483))), SN(32, 0)); + RzILOpPure *op_AND_492 = LOGAND(op_RSHIFT_489, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_495 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_475), DUP(op_AND_475))), CAST(16, MSB(DUP(op_AND_475)), DUP(op_AND_475))), CAST(32, MSB(CAST(16, MSB(op_AND_492), DUP(op_AND_492))), CAST(16, MSB(DUP(op_AND_492)), DUP(op_AND_492)))); + RzILOpPure *op_LSHIFT_498 = SHIFTL0(CAST(64, MSB(op_MUL_495), DUP(op_MUL_495)), SN(32, 1)); + RzILOpPure *op_RSHIFT_502 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_505 = LOGAND(op_RSHIFT_502, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_511 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_513 = LOGAND(op_RSHIFT_511, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_519 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_513), DUP(op_AND_513))), CAST(32, MSB(DUP(op_AND_513)), DUP(op_AND_513))), SN(32, 16)); + RzILOpPure *op_AND_522 = LOGAND(op_RSHIFT_519, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_525 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_505), DUP(op_AND_505))), CAST(16, MSB(DUP(op_AND_505)), DUP(op_AND_505))), CAST(32, MSB(CAST(16, MSB(op_AND_522), DUP(op_AND_522))), CAST(16, MSB(DUP(op_AND_522)), DUP(op_AND_522)))); + RzILOpPure *op_LSHIFT_528 = SHIFTL0(CAST(64, MSB(op_MUL_525), DUP(op_MUL_525)), SN(32, 1)); + RzILOpPure *op_ADD_529 = ADD(op_LSHIFT_498, op_LSHIFT_528); + RzILOpPure *op_ADD_532 = ADD(op_ADD_529, CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *cond_622 = ITE(DUP(op_EQ_468), op_ADD_532, VARL("h_tmp432")); + RzILOpPure *op_RSHIFT_626 = SHIFTRA(cond_622, SN(32, 16)); + RzILOpPure *op_AND_629 = LOGAND(op_RSHIFT_626, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_633 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_629), DUP(op_AND_629))), CAST(16, MSB(DUP(op_AND_629)), DUP(op_AND_629))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_638 = SHIFTL0(CAST(64, IL_FALSE, op_AND_633), SN(32, 0)); + RzILOpPure *op_OR_640 = LOGOR(CAST(64, IL_FALSE, op_AND_331), op_LSHIFT_638); + RzILOpEffect *op_ASSIGN_642 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_640)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_643 = SEQN(2, seq_621, op_ASSIGN_642); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_322, seq_643); + return instruction_sequence; +} + +// Rxx += vrmpyh(Rss,Rtt) +RzILOpEffect *hex_il_op_m2_vrmac_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rxx = Rxx + ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_8 = LOGAND(op_RSHIFT_5, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_15, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_21 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_8), DUP(op_AND_8))), CAST(16, MSB(DUP(op_AND_8)), DUP(op_AND_8))), CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18)))); + RzILOpPure *op_ADD_23 = ADD(READ_REG(pkt, Rxx_op, false), CAST(64, MSB(op_MUL_21), DUP(op_MUL_21))); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_27, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_36 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_39 = LOGAND(op_RSHIFT_36, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_42 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_30), DUP(op_AND_30))), CAST(16, MSB(DUP(op_AND_30)), DUP(op_AND_30))), CAST(32, MSB(CAST(16, MSB(op_AND_39), DUP(op_AND_39))), CAST(16, MSB(DUP(op_AND_39)), DUP(op_AND_39)))); + RzILOpPure *op_ADD_44 = ADD(op_ADD_23, CAST(64, MSB(op_MUL_42), DUP(op_MUL_42))); + RzILOpPure *op_RSHIFT_48 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_51 = LOGAND(op_RSHIFT_48, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_57 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_60 = LOGAND(op_RSHIFT_57, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_63 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_51), DUP(op_AND_51))), CAST(16, MSB(DUP(op_AND_51)), DUP(op_AND_51))), CAST(32, MSB(CAST(16, MSB(op_AND_60), DUP(op_AND_60))), CAST(16, MSB(DUP(op_AND_60)), DUP(op_AND_60)))); + RzILOpPure *op_ADD_65 = ADD(op_ADD_44, CAST(64, MSB(op_MUL_63), DUP(op_MUL_63))); + RzILOpPure *op_RSHIFT_69 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_72 = LOGAND(op_RSHIFT_69, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_78 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_81 = LOGAND(op_RSHIFT_78, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_84 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_72), DUP(op_AND_72))), CAST(16, MSB(DUP(op_AND_72)), DUP(op_AND_72))), CAST(32, MSB(CAST(16, MSB(op_AND_81), DUP(op_AND_81))), CAST(16, MSB(DUP(op_AND_81)), DUP(op_AND_81)))); + RzILOpPure *op_ADD_86 = ADD(op_ADD_65, CAST(64, MSB(op_MUL_84), DUP(op_MUL_84))); + RzILOpEffect *op_ASSIGN_87 = WRITE_REG(bundle, Rxx_op, op_ADD_86); + + RzILOpEffect *instruction_sequence = op_ASSIGN_87; + return instruction_sequence; +} + +// Rdd = vrmpyh(Rss,Rtt) +RzILOpEffect *hex_il_op_m2_vrmpy_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rdd = ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) * ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_8 = LOGAND(op_RSHIFT_5, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_15, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_21 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_8), DUP(op_AND_8))), CAST(16, MSB(DUP(op_AND_8)), DUP(op_AND_8))), CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18)))); + RzILOpPure *op_RSHIFT_26 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_29 = LOGAND(op_RSHIFT_26, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_35, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_41 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_29), DUP(op_AND_29))), CAST(16, MSB(DUP(op_AND_29)), DUP(op_AND_29))), CAST(32, MSB(CAST(16, MSB(op_AND_38), DUP(op_AND_38))), CAST(16, MSB(DUP(op_AND_38)), DUP(op_AND_38)))); + RzILOpPure *op_ADD_43 = ADD(CAST(64, MSB(op_MUL_21), DUP(op_MUL_21)), CAST(64, MSB(op_MUL_41), DUP(op_MUL_41))); + RzILOpPure *op_RSHIFT_47 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_50 = LOGAND(op_RSHIFT_47, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_56 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_59 = LOGAND(op_RSHIFT_56, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_62 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_50), DUP(op_AND_50))), CAST(16, MSB(DUP(op_AND_50)), DUP(op_AND_50))), CAST(32, MSB(CAST(16, MSB(op_AND_59), DUP(op_AND_59))), CAST(16, MSB(DUP(op_AND_59)), DUP(op_AND_59)))); + RzILOpPure *op_ADD_64 = ADD(op_ADD_43, CAST(64, MSB(op_MUL_62), DUP(op_MUL_62))); + RzILOpPure *op_RSHIFT_68 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_71 = LOGAND(op_RSHIFT_68, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_77 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_80 = LOGAND(op_RSHIFT_77, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_83 = MUL(CAST(32, MSB(CAST(16, MSB(op_AND_71), DUP(op_AND_71))), CAST(16, MSB(DUP(op_AND_71)), DUP(op_AND_71))), CAST(32, MSB(CAST(16, MSB(op_AND_80), DUP(op_AND_80))), CAST(16, MSB(DUP(op_AND_80)), DUP(op_AND_80)))); + RzILOpPure *op_ADD_85 = ADD(op_ADD_64, CAST(64, MSB(op_MUL_83), DUP(op_MUL_83))); + RzILOpEffect *op_ASSIGN_86 = WRITE_REG(bundle, Rdd_op, op_ADD_85); + + RzILOpEffect *instruction_sequence = op_ASSIGN_86; + return instruction_sequence; +} + +// Rx ^= xor(Rs,Rt) +RzILOpEffect *hex_il_op_m2_xor_xacc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = (Rx ^ (Rs ^ Rt)); + RzILOpPure *op_XOR_3 = LOGXOR(Rs, Rt); + RzILOpPure *op_XOR_4 = LOGXOR(READ_REG(pkt, Rx_op, false), op_XOR_3); + RzILOpEffect *op_ASSIGN_XOR_5 = WRITE_REG(bundle, Rx_op, op_XOR_4); + + RzILOpEffect *instruction_sequence = op_ASSIGN_XOR_5; + return instruction_sequence; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_M4_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_M4_ops.c new file mode 100644 index 00000000000..3b51aac7985 --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_M4_ops.c @@ -0,0 +1,1441 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// Rx &= and(Rs,Rt) +RzILOpEffect *hex_il_op_m4_and_and(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = (Rx & (Rs & Rt)); + RzILOpPure *op_AND_3 = LOGAND(Rs, Rt); + RzILOpPure *op_AND_4 = LOGAND(READ_REG(pkt, Rx_op, false), op_AND_3); + RzILOpEffect *op_ASSIGN_AND_5 = WRITE_REG(bundle, Rx_op, op_AND_4); + + RzILOpEffect *instruction_sequence = op_ASSIGN_AND_5; + return instruction_sequence; +} + +// Rx &= and(Rs,~Rt) +RzILOpEffect *hex_il_op_m4_and_andn(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = (Rx & (Rs & (~Rt))); + RzILOpPure *op_NOT_3 = LOGNOT(Rt); + RzILOpPure *op_AND_4 = LOGAND(Rs, op_NOT_3); + RzILOpPure *op_AND_5 = LOGAND(READ_REG(pkt, Rx_op, false), op_AND_4); + RzILOpEffect *op_ASSIGN_AND_6 = WRITE_REG(bundle, Rx_op, op_AND_5); + + RzILOpEffect *instruction_sequence = op_ASSIGN_AND_6; + return instruction_sequence; +} + +// Rx &= or(Rs,Rt) +RzILOpEffect *hex_il_op_m4_and_or(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = (Rx & (Rs | Rt)); + RzILOpPure *op_OR_3 = LOGOR(Rs, Rt); + RzILOpPure *op_AND_4 = LOGAND(READ_REG(pkt, Rx_op, false), op_OR_3); + RzILOpEffect *op_ASSIGN_AND_5 = WRITE_REG(bundle, Rx_op, op_AND_4); + + RzILOpEffect *instruction_sequence = op_ASSIGN_AND_5; + return instruction_sequence; +} + +// Rx &= xor(Rs,Rt) +RzILOpEffect *hex_il_op_m4_and_xor(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = (Rx & (Rs ^ Rt)); + RzILOpPure *op_XOR_3 = LOGXOR(Rs, Rt); + RzILOpPure *op_AND_4 = LOGAND(READ_REG(pkt, Rx_op, false), op_XOR_3); + RzILOpEffect *op_ASSIGN_AND_5 = WRITE_REG(bundle, Rx_op, op_AND_4); + + RzILOpEffect *instruction_sequence = op_ASSIGN_AND_5; + return instruction_sequence; +} + +// Rd = cmpyiwh(Rss,Rt):<<1:rnd:sat +RzILOpEffect *hex_il_op_m4_cmpyi_wh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_188 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) + ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) + ((st64) 0x4000) >> 0xf)), 0x0, 0x20) == (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) + ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) + ((st64) 0x4000) >> 0xf))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) + ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) + ((st64) 0x4000) >> 0xf) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_24 = LOGAND(op_RSHIFT_22, SN(32, 0xffff)); + RzILOpPure *op_MUL_31 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_10), DUP(op_AND_10))), CAST(32, MSB(DUP(op_AND_10)), DUP(op_AND_10)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(DUP(op_AND_10)), DUP(op_AND_10))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(DUP(op_AND_10)), DUP(op_AND_10)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(DUP(op_AND_10)), DUP(op_AND_10))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_24), DUP(op_AND_24))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_37 = LOGAND(op_RSHIFT_35, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_48 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_50 = LOGAND(op_RSHIFT_48, SN(32, 0xffff)); + RzILOpPure *op_MUL_57 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_37), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_50), DUP(op_AND_50))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_ADD_58 = ADD(op_MUL_31, op_MUL_57); + RzILOpPure *op_ADD_61 = ADD(op_ADD_58, CAST(64, MSB(SN(32, 0x4000)), SN(32, 0x4000))); + RzILOpPure *op_RSHIFT_63 = SHIFTRA(op_ADD_61, SN(32, 15)); + RzILOpPure *op_RSHIFT_72 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_74 = LOGAND(op_RSHIFT_72, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_85 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_87 = LOGAND(op_RSHIFT_85, SN(32, 0xffff)); + RzILOpPure *op_MUL_94 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_74), DUP(op_AND_74))), CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))), CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))), CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))), CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_87), DUP(op_AND_87))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_RSHIFT_98 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_100 = LOGAND(op_RSHIFT_98, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_111 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_113 = LOGAND(op_RSHIFT_111, SN(32, 0xffff)); + RzILOpPure *op_MUL_120 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_100), DUP(op_AND_100))), CAST(32, MSB(DUP(op_AND_100)), DUP(op_AND_100)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_100)), DUP(op_AND_100))), CAST(32, MSB(DUP(op_AND_100)), DUP(op_AND_100))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_100)), DUP(op_AND_100))), CAST(32, MSB(DUP(op_AND_100)), DUP(op_AND_100)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_100)), DUP(op_AND_100))), CAST(32, MSB(DUP(op_AND_100)), DUP(op_AND_100))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_113), DUP(op_AND_113))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_ADD_121 = ADD(op_MUL_94, op_MUL_120); + RzILOpPure *op_ADD_124 = ADD(op_ADD_121, CAST(64, MSB(SN(32, 0x4000)), SN(32, 0x4000))); + RzILOpPure *op_RSHIFT_126 = SHIFTRA(op_ADD_124, SN(32, 15)); + RzILOpPure *op_EQ_127 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_63), SN(32, 0), SN(32, 0x20)), op_RSHIFT_126); + RzILOpPure *op_RSHIFT_192 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_194 = LOGAND(op_RSHIFT_192, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_205 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_207 = LOGAND(op_RSHIFT_205, SN(32, 0xffff)); + RzILOpPure *op_MUL_214 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_194), DUP(op_AND_194))), CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))), CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))), CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))), CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_207), DUP(op_AND_207))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_RSHIFT_218 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_220 = LOGAND(op_RSHIFT_218, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_231 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_233 = LOGAND(op_RSHIFT_231, SN(32, 0xffff)); + RzILOpPure *op_MUL_240 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_220), DUP(op_AND_220))), CAST(32, MSB(DUP(op_AND_220)), DUP(op_AND_220)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_220)), DUP(op_AND_220))), CAST(32, MSB(DUP(op_AND_220)), DUP(op_AND_220))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_220)), DUP(op_AND_220))), CAST(32, MSB(DUP(op_AND_220)), DUP(op_AND_220)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_220)), DUP(op_AND_220))), CAST(32, MSB(DUP(op_AND_220)), DUP(op_AND_220))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_233), DUP(op_AND_233))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_ADD_241 = ADD(op_MUL_214, op_MUL_240); + RzILOpPure *op_ADD_244 = ADD(op_ADD_241, CAST(64, MSB(SN(32, 0x4000)), SN(32, 0x4000))); + RzILOpPure *op_RSHIFT_246 = SHIFTRA(op_ADD_244, SN(32, 15)); + RzILOpPure *op_LT_249 = SLT(op_RSHIFT_246, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_254 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_255 = NEG(op_LSHIFT_254); + RzILOpPure *op_LSHIFT_260 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_263 = SUB(op_LSHIFT_260, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_264 = ITE(op_LT_249, op_NEG_255, op_SUB_263); + RzILOpEffect *gcc_expr_265 = BRANCH(op_EQ_127, EMPTY(), set_usr_field_call_188); + + // h_tmp433 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) + ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) + ((st64) 0x4000) >> 0xf)), 0x0, 0x20) == (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) + ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) + ((st64) 0x4000) >> 0xf))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) + ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) + ((st64) 0x4000) >> 0xf) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_267 = SETL("h_tmp433", cond_264); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st64 ...; + RzILOpEffect *seq_268 = SEQN(2, gcc_expr_265, op_ASSIGN_hybrid_tmp_267); + + // Rd = ((st32) ((sextract64(((ut64) (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) + ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) + ((st64) 0x4000) >> 0xf)), 0x0, 0x20) == (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) + ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) + ((st64) 0x4000) >> 0xf)) ? (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) + ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) + ((st64) 0x4000) >> 0xf) : h_tmp433)); + RzILOpPure *op_RSHIFT_131 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_133 = LOGAND(op_RSHIFT_131, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_144 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_146 = LOGAND(op_RSHIFT_144, SN(32, 0xffff)); + RzILOpPure *op_MUL_153 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_133), DUP(op_AND_133))), CAST(32, MSB(DUP(op_AND_133)), DUP(op_AND_133)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_133)), DUP(op_AND_133))), CAST(32, MSB(DUP(op_AND_133)), DUP(op_AND_133))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_133)), DUP(op_AND_133))), CAST(32, MSB(DUP(op_AND_133)), DUP(op_AND_133)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_133)), DUP(op_AND_133))), CAST(32, MSB(DUP(op_AND_133)), DUP(op_AND_133))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_146), DUP(op_AND_146))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_RSHIFT_157 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_159 = LOGAND(op_RSHIFT_157, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_170 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_172 = LOGAND(op_RSHIFT_170, SN(32, 0xffff)); + RzILOpPure *op_MUL_179 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_159), DUP(op_AND_159))), CAST(32, MSB(DUP(op_AND_159)), DUP(op_AND_159)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_159)), DUP(op_AND_159))), CAST(32, MSB(DUP(op_AND_159)), DUP(op_AND_159))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_159)), DUP(op_AND_159))), CAST(32, MSB(DUP(op_AND_159)), DUP(op_AND_159)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_159)), DUP(op_AND_159))), CAST(32, MSB(DUP(op_AND_159)), DUP(op_AND_159))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_172), DUP(op_AND_172))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_ADD_180 = ADD(op_MUL_153, op_MUL_179); + RzILOpPure *op_ADD_183 = ADD(op_ADD_180, CAST(64, MSB(SN(32, 0x4000)), SN(32, 0x4000))); + RzILOpPure *op_RSHIFT_185 = SHIFTRA(op_ADD_183, SN(32, 15)); + RzILOpPure *cond_269 = ITE(DUP(op_EQ_127), op_RSHIFT_185, VARL("h_tmp433")); + RzILOpEffect *op_ASSIGN_271 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_269), DUP(cond_269))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_272 = SEQN(2, seq_268, op_ASSIGN_271); + + RzILOpEffect *instruction_sequence = seq_272; + return instruction_sequence; +} + +// Rd = cmpyiwh(Rss,Rt*):<<1:rnd:sat +RzILOpEffect *hex_il_op_m4_cmpyi_whc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_188 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) - ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) + ((st64) 0x4000) >> 0xf)), 0x0, 0x20) == (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) - ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) + ((st64) 0x4000) >> 0xf))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) - ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) + ((st64) 0x4000) >> 0xf) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_24 = LOGAND(op_RSHIFT_22, SN(32, 0xffff)); + RzILOpPure *op_MUL_31 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_10), DUP(op_AND_10))), CAST(32, MSB(DUP(op_AND_10)), DUP(op_AND_10)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(DUP(op_AND_10)), DUP(op_AND_10))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(DUP(op_AND_10)), DUP(op_AND_10)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(DUP(op_AND_10)), DUP(op_AND_10))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_24), DUP(op_AND_24))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_37 = LOGAND(op_RSHIFT_35, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_48 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_50 = LOGAND(op_RSHIFT_48, SN(32, 0xffff)); + RzILOpPure *op_MUL_57 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_37), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_50), DUP(op_AND_50))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_SUB_58 = SUB(op_MUL_31, op_MUL_57); + RzILOpPure *op_ADD_61 = ADD(op_SUB_58, CAST(64, MSB(SN(32, 0x4000)), SN(32, 0x4000))); + RzILOpPure *op_RSHIFT_63 = SHIFTRA(op_ADD_61, SN(32, 15)); + RzILOpPure *op_RSHIFT_72 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_74 = LOGAND(op_RSHIFT_72, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_85 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_87 = LOGAND(op_RSHIFT_85, SN(32, 0xffff)); + RzILOpPure *op_MUL_94 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_74), DUP(op_AND_74))), CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))), CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))), CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))), CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_87), DUP(op_AND_87))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_RSHIFT_98 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_100 = LOGAND(op_RSHIFT_98, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_111 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_113 = LOGAND(op_RSHIFT_111, SN(32, 0xffff)); + RzILOpPure *op_MUL_120 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_100), DUP(op_AND_100))), CAST(32, MSB(DUP(op_AND_100)), DUP(op_AND_100)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_100)), DUP(op_AND_100))), CAST(32, MSB(DUP(op_AND_100)), DUP(op_AND_100))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_100)), DUP(op_AND_100))), CAST(32, MSB(DUP(op_AND_100)), DUP(op_AND_100)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_100)), DUP(op_AND_100))), CAST(32, MSB(DUP(op_AND_100)), DUP(op_AND_100))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_113), DUP(op_AND_113))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_SUB_121 = SUB(op_MUL_94, op_MUL_120); + RzILOpPure *op_ADD_124 = ADD(op_SUB_121, CAST(64, MSB(SN(32, 0x4000)), SN(32, 0x4000))); + RzILOpPure *op_RSHIFT_126 = SHIFTRA(op_ADD_124, SN(32, 15)); + RzILOpPure *op_EQ_127 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_63), SN(32, 0), SN(32, 0x20)), op_RSHIFT_126); + RzILOpPure *op_RSHIFT_192 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_194 = LOGAND(op_RSHIFT_192, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_205 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_207 = LOGAND(op_RSHIFT_205, SN(32, 0xffff)); + RzILOpPure *op_MUL_214 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_194), DUP(op_AND_194))), CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))), CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))), CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))), CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_207), DUP(op_AND_207))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_RSHIFT_218 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_220 = LOGAND(op_RSHIFT_218, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_231 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_233 = LOGAND(op_RSHIFT_231, SN(32, 0xffff)); + RzILOpPure *op_MUL_240 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_220), DUP(op_AND_220))), CAST(32, MSB(DUP(op_AND_220)), DUP(op_AND_220)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_220)), DUP(op_AND_220))), CAST(32, MSB(DUP(op_AND_220)), DUP(op_AND_220))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_220)), DUP(op_AND_220))), CAST(32, MSB(DUP(op_AND_220)), DUP(op_AND_220)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_220)), DUP(op_AND_220))), CAST(32, MSB(DUP(op_AND_220)), DUP(op_AND_220))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_233), DUP(op_AND_233))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_SUB_241 = SUB(op_MUL_214, op_MUL_240); + RzILOpPure *op_ADD_244 = ADD(op_SUB_241, CAST(64, MSB(SN(32, 0x4000)), SN(32, 0x4000))); + RzILOpPure *op_RSHIFT_246 = SHIFTRA(op_ADD_244, SN(32, 15)); + RzILOpPure *op_LT_249 = SLT(op_RSHIFT_246, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_254 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_255 = NEG(op_LSHIFT_254); + RzILOpPure *op_LSHIFT_260 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_263 = SUB(op_LSHIFT_260, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_264 = ITE(op_LT_249, op_NEG_255, op_SUB_263); + RzILOpEffect *gcc_expr_265 = BRANCH(op_EQ_127, EMPTY(), set_usr_field_call_188); + + // h_tmp434 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) - ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) + ((st64) 0x4000) >> 0xf)), 0x0, 0x20) == (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) - ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) + ((st64) 0x4000) >> 0xf))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) - ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) + ((st64) 0x4000) >> 0xf) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_267 = SETL("h_tmp434", cond_264); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st64 ...; + RzILOpEffect *seq_268 = SEQN(2, gcc_expr_265, op_ASSIGN_hybrid_tmp_267); + + // Rd = ((st32) ((sextract64(((ut64) (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) - ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) + ((st64) 0x4000) >> 0xf)), 0x0, 0x20) == (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) - ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) + ((st64) 0x4000) >> 0xf)) ? (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) - ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) + ((st64) 0x4000) >> 0xf) : h_tmp434)); + RzILOpPure *op_RSHIFT_131 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_133 = LOGAND(op_RSHIFT_131, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_144 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_146 = LOGAND(op_RSHIFT_144, SN(32, 0xffff)); + RzILOpPure *op_MUL_153 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_133), DUP(op_AND_133))), CAST(32, MSB(DUP(op_AND_133)), DUP(op_AND_133)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_133)), DUP(op_AND_133))), CAST(32, MSB(DUP(op_AND_133)), DUP(op_AND_133))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_133)), DUP(op_AND_133))), CAST(32, MSB(DUP(op_AND_133)), DUP(op_AND_133)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_133)), DUP(op_AND_133))), CAST(32, MSB(DUP(op_AND_133)), DUP(op_AND_133))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_146), DUP(op_AND_146))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_RSHIFT_157 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_159 = LOGAND(op_RSHIFT_157, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_170 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_172 = LOGAND(op_RSHIFT_170, SN(32, 0xffff)); + RzILOpPure *op_MUL_179 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_159), DUP(op_AND_159))), CAST(32, MSB(DUP(op_AND_159)), DUP(op_AND_159)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_159)), DUP(op_AND_159))), CAST(32, MSB(DUP(op_AND_159)), DUP(op_AND_159))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_159)), DUP(op_AND_159))), CAST(32, MSB(DUP(op_AND_159)), DUP(op_AND_159)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_159)), DUP(op_AND_159))), CAST(32, MSB(DUP(op_AND_159)), DUP(op_AND_159))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_172), DUP(op_AND_172))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_SUB_180 = SUB(op_MUL_153, op_MUL_179); + RzILOpPure *op_ADD_183 = ADD(op_SUB_180, CAST(64, MSB(SN(32, 0x4000)), SN(32, 0x4000))); + RzILOpPure *op_RSHIFT_185 = SHIFTRA(op_ADD_183, SN(32, 15)); + RzILOpPure *cond_269 = ITE(DUP(op_EQ_127), op_RSHIFT_185, VARL("h_tmp434")); + RzILOpEffect *op_ASSIGN_271 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_269), DUP(cond_269))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_272 = SEQN(2, seq_268, op_ASSIGN_271); + + RzILOpEffect *instruction_sequence = seq_272; + return instruction_sequence; +} + +// Rd = cmpyrwh(Rss,Rt):<<1:rnd:sat +RzILOpEffect *hex_il_op_m4_cmpyr_wh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_188 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) - ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) + ((st64) 0x4000) >> 0xf)), 0x0, 0x20) == (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) - ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) + ((st64) 0x4000) >> 0xf))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) - ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) + ((st64) 0x4000) >> 0xf) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_24 = LOGAND(op_RSHIFT_22, SN(32, 0xffff)); + RzILOpPure *op_MUL_31 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_10), DUP(op_AND_10))), CAST(32, MSB(DUP(op_AND_10)), DUP(op_AND_10)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(DUP(op_AND_10)), DUP(op_AND_10))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(DUP(op_AND_10)), DUP(op_AND_10)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(DUP(op_AND_10)), DUP(op_AND_10))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_24), DUP(op_AND_24))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_37 = LOGAND(op_RSHIFT_35, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_48 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_50 = LOGAND(op_RSHIFT_48, SN(32, 0xffff)); + RzILOpPure *op_MUL_57 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_37), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_50), DUP(op_AND_50))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_SUB_58 = SUB(op_MUL_31, op_MUL_57); + RzILOpPure *op_ADD_61 = ADD(op_SUB_58, CAST(64, MSB(SN(32, 0x4000)), SN(32, 0x4000))); + RzILOpPure *op_RSHIFT_63 = SHIFTRA(op_ADD_61, SN(32, 15)); + RzILOpPure *op_RSHIFT_72 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_74 = LOGAND(op_RSHIFT_72, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_85 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_87 = LOGAND(op_RSHIFT_85, SN(32, 0xffff)); + RzILOpPure *op_MUL_94 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_74), DUP(op_AND_74))), CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))), CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))), CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))), CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_87), DUP(op_AND_87))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_RSHIFT_98 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_100 = LOGAND(op_RSHIFT_98, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_111 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_113 = LOGAND(op_RSHIFT_111, SN(32, 0xffff)); + RzILOpPure *op_MUL_120 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_100), DUP(op_AND_100))), CAST(32, MSB(DUP(op_AND_100)), DUP(op_AND_100)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_100)), DUP(op_AND_100))), CAST(32, MSB(DUP(op_AND_100)), DUP(op_AND_100))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_100)), DUP(op_AND_100))), CAST(32, MSB(DUP(op_AND_100)), DUP(op_AND_100)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_100)), DUP(op_AND_100))), CAST(32, MSB(DUP(op_AND_100)), DUP(op_AND_100))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_113), DUP(op_AND_113))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_SUB_121 = SUB(op_MUL_94, op_MUL_120); + RzILOpPure *op_ADD_124 = ADD(op_SUB_121, CAST(64, MSB(SN(32, 0x4000)), SN(32, 0x4000))); + RzILOpPure *op_RSHIFT_126 = SHIFTRA(op_ADD_124, SN(32, 15)); + RzILOpPure *op_EQ_127 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_63), SN(32, 0), SN(32, 0x20)), op_RSHIFT_126); + RzILOpPure *op_RSHIFT_192 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_194 = LOGAND(op_RSHIFT_192, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_205 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_207 = LOGAND(op_RSHIFT_205, SN(32, 0xffff)); + RzILOpPure *op_MUL_214 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_194), DUP(op_AND_194))), CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))), CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))), CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))), CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_207), DUP(op_AND_207))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_RSHIFT_218 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_220 = LOGAND(op_RSHIFT_218, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_231 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_233 = LOGAND(op_RSHIFT_231, SN(32, 0xffff)); + RzILOpPure *op_MUL_240 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_220), DUP(op_AND_220))), CAST(32, MSB(DUP(op_AND_220)), DUP(op_AND_220)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_220)), DUP(op_AND_220))), CAST(32, MSB(DUP(op_AND_220)), DUP(op_AND_220))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_220)), DUP(op_AND_220))), CAST(32, MSB(DUP(op_AND_220)), DUP(op_AND_220)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_220)), DUP(op_AND_220))), CAST(32, MSB(DUP(op_AND_220)), DUP(op_AND_220))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_233), DUP(op_AND_233))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_SUB_241 = SUB(op_MUL_214, op_MUL_240); + RzILOpPure *op_ADD_244 = ADD(op_SUB_241, CAST(64, MSB(SN(32, 0x4000)), SN(32, 0x4000))); + RzILOpPure *op_RSHIFT_246 = SHIFTRA(op_ADD_244, SN(32, 15)); + RzILOpPure *op_LT_249 = SLT(op_RSHIFT_246, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_254 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_255 = NEG(op_LSHIFT_254); + RzILOpPure *op_LSHIFT_260 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_263 = SUB(op_LSHIFT_260, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_264 = ITE(op_LT_249, op_NEG_255, op_SUB_263); + RzILOpEffect *gcc_expr_265 = BRANCH(op_EQ_127, EMPTY(), set_usr_field_call_188); + + // h_tmp435 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) - ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) + ((st64) 0x4000) >> 0xf)), 0x0, 0x20) == (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) - ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) + ((st64) 0x4000) >> 0xf))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) - ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) + ((st64) 0x4000) >> 0xf) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_267 = SETL("h_tmp435", cond_264); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st64 ...; + RzILOpEffect *seq_268 = SEQN(2, gcc_expr_265, op_ASSIGN_hybrid_tmp_267); + + // Rd = ((st32) ((sextract64(((ut64) (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) - ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) + ((st64) 0x4000) >> 0xf)), 0x0, 0x20) == (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) - ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) + ((st64) 0x4000) >> 0xf)) ? (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) - ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) + ((st64) 0x4000) >> 0xf) : h_tmp435)); + RzILOpPure *op_RSHIFT_131 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_133 = LOGAND(op_RSHIFT_131, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_144 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_146 = LOGAND(op_RSHIFT_144, SN(32, 0xffff)); + RzILOpPure *op_MUL_153 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_133), DUP(op_AND_133))), CAST(32, MSB(DUP(op_AND_133)), DUP(op_AND_133)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_133)), DUP(op_AND_133))), CAST(32, MSB(DUP(op_AND_133)), DUP(op_AND_133))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_133)), DUP(op_AND_133))), CAST(32, MSB(DUP(op_AND_133)), DUP(op_AND_133)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_133)), DUP(op_AND_133))), CAST(32, MSB(DUP(op_AND_133)), DUP(op_AND_133))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_146), DUP(op_AND_146))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_RSHIFT_157 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_159 = LOGAND(op_RSHIFT_157, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_170 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_172 = LOGAND(op_RSHIFT_170, SN(32, 0xffff)); + RzILOpPure *op_MUL_179 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_159), DUP(op_AND_159))), CAST(32, MSB(DUP(op_AND_159)), DUP(op_AND_159)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_159)), DUP(op_AND_159))), CAST(32, MSB(DUP(op_AND_159)), DUP(op_AND_159))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_159)), DUP(op_AND_159))), CAST(32, MSB(DUP(op_AND_159)), DUP(op_AND_159)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_159)), DUP(op_AND_159))), CAST(32, MSB(DUP(op_AND_159)), DUP(op_AND_159))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_172), DUP(op_AND_172))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_SUB_180 = SUB(op_MUL_153, op_MUL_179); + RzILOpPure *op_ADD_183 = ADD(op_SUB_180, CAST(64, MSB(SN(32, 0x4000)), SN(32, 0x4000))); + RzILOpPure *op_RSHIFT_185 = SHIFTRA(op_ADD_183, SN(32, 15)); + RzILOpPure *cond_269 = ITE(DUP(op_EQ_127), op_RSHIFT_185, VARL("h_tmp435")); + RzILOpEffect *op_ASSIGN_271 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_269), DUP(cond_269))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_272 = SEQN(2, seq_268, op_ASSIGN_271); + + RzILOpEffect *instruction_sequence = seq_272; + return instruction_sequence; +} + +// Rd = cmpyrwh(Rss,Rt*):<<1:rnd:sat +RzILOpEffect *hex_il_op_m4_cmpyr_whc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_188 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) + ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) + ((st64) 0x4000) >> 0xf)), 0x0, 0x20) == (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) + ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) + ((st64) 0x4000) >> 0xf))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) + ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) + ((st64) 0x4000) >> 0xf) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_8 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_10 = LOGAND(op_RSHIFT_8, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_24 = LOGAND(op_RSHIFT_22, SN(32, 0xffff)); + RzILOpPure *op_MUL_31 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_10), DUP(op_AND_10))), CAST(32, MSB(DUP(op_AND_10)), DUP(op_AND_10)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(DUP(op_AND_10)), DUP(op_AND_10))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(DUP(op_AND_10)), DUP(op_AND_10)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_10)), DUP(op_AND_10))), CAST(32, MSB(DUP(op_AND_10)), DUP(op_AND_10))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_24), DUP(op_AND_24))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_37 = LOGAND(op_RSHIFT_35, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_48 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_50 = LOGAND(op_RSHIFT_48, SN(32, 0xffff)); + RzILOpPure *op_MUL_57 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_37), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_50), DUP(op_AND_50))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_ADD_58 = ADD(op_MUL_31, op_MUL_57); + RzILOpPure *op_ADD_61 = ADD(op_ADD_58, CAST(64, MSB(SN(32, 0x4000)), SN(32, 0x4000))); + RzILOpPure *op_RSHIFT_63 = SHIFTRA(op_ADD_61, SN(32, 15)); + RzILOpPure *op_RSHIFT_72 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_74 = LOGAND(op_RSHIFT_72, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_85 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_87 = LOGAND(op_RSHIFT_85, SN(32, 0xffff)); + RzILOpPure *op_MUL_94 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_74), DUP(op_AND_74))), CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))), CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))), CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))), CAST(32, MSB(DUP(op_AND_74)), DUP(op_AND_74))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_87), DUP(op_AND_87))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_RSHIFT_98 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_100 = LOGAND(op_RSHIFT_98, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_111 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_113 = LOGAND(op_RSHIFT_111, SN(32, 0xffff)); + RzILOpPure *op_MUL_120 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_100), DUP(op_AND_100))), CAST(32, MSB(DUP(op_AND_100)), DUP(op_AND_100)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_100)), DUP(op_AND_100))), CAST(32, MSB(DUP(op_AND_100)), DUP(op_AND_100))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_100)), DUP(op_AND_100))), CAST(32, MSB(DUP(op_AND_100)), DUP(op_AND_100)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_100)), DUP(op_AND_100))), CAST(32, MSB(DUP(op_AND_100)), DUP(op_AND_100))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_113), DUP(op_AND_113))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_ADD_121 = ADD(op_MUL_94, op_MUL_120); + RzILOpPure *op_ADD_124 = ADD(op_ADD_121, CAST(64, MSB(SN(32, 0x4000)), SN(32, 0x4000))); + RzILOpPure *op_RSHIFT_126 = SHIFTRA(op_ADD_124, SN(32, 15)); + RzILOpPure *op_EQ_127 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_63), SN(32, 0), SN(32, 0x20)), op_RSHIFT_126); + RzILOpPure *op_RSHIFT_192 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_194 = LOGAND(op_RSHIFT_192, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_205 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_207 = LOGAND(op_RSHIFT_205, SN(32, 0xffff)); + RzILOpPure *op_MUL_214 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_194), DUP(op_AND_194))), CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))), CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))), CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))), CAST(32, MSB(DUP(op_AND_194)), DUP(op_AND_194))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_207), DUP(op_AND_207))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_RSHIFT_218 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_220 = LOGAND(op_RSHIFT_218, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_231 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_233 = LOGAND(op_RSHIFT_231, SN(32, 0xffff)); + RzILOpPure *op_MUL_240 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_220), DUP(op_AND_220))), CAST(32, MSB(DUP(op_AND_220)), DUP(op_AND_220)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_220)), DUP(op_AND_220))), CAST(32, MSB(DUP(op_AND_220)), DUP(op_AND_220))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_220)), DUP(op_AND_220))), CAST(32, MSB(DUP(op_AND_220)), DUP(op_AND_220)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_220)), DUP(op_AND_220))), CAST(32, MSB(DUP(op_AND_220)), DUP(op_AND_220))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_233), DUP(op_AND_233))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_ADD_241 = ADD(op_MUL_214, op_MUL_240); + RzILOpPure *op_ADD_244 = ADD(op_ADD_241, CAST(64, MSB(SN(32, 0x4000)), SN(32, 0x4000))); + RzILOpPure *op_RSHIFT_246 = SHIFTRA(op_ADD_244, SN(32, 15)); + RzILOpPure *op_LT_249 = SLT(op_RSHIFT_246, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_254 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_255 = NEG(op_LSHIFT_254); + RzILOpPure *op_LSHIFT_260 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_263 = SUB(op_LSHIFT_260, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_264 = ITE(op_LT_249, op_NEG_255, op_SUB_263); + RzILOpEffect *gcc_expr_265 = BRANCH(op_EQ_127, EMPTY(), set_usr_field_call_188); + + // h_tmp436 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) + ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) + ((st64) 0x4000) >> 0xf)), 0x0, 0x20) == (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) + ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) + ((st64) 0x4000) >> 0xf))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) + ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) + ((st64) 0x4000) >> 0xf) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_267 = SETL("h_tmp436", cond_264); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) ((st64 ...; + RzILOpEffect *seq_268 = SEQN(2, gcc_expr_265, op_ASSIGN_hybrid_tmp_267); + + // Rd = ((st32) ((sextract64(((ut64) (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) + ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) + ((st64) 0x4000) >> 0xf)), 0x0, 0x20) == (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) + ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) + ((st64) 0x4000) >> 0xf)) ? (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x0) & 0xffff))), 0x0, 0x10) + ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rt >> 0x10) & 0xffff))), 0x0, 0x10) + ((st64) 0x4000) >> 0xf) : h_tmp436)); + RzILOpPure *op_RSHIFT_131 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_133 = LOGAND(op_RSHIFT_131, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_144 = SHIFTRA(DUP(Rt), SN(32, 0)); + RzILOpPure *op_AND_146 = LOGAND(op_RSHIFT_144, SN(32, 0xffff)); + RzILOpPure *op_MUL_153 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_133), DUP(op_AND_133))), CAST(32, MSB(DUP(op_AND_133)), DUP(op_AND_133)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_133)), DUP(op_AND_133))), CAST(32, MSB(DUP(op_AND_133)), DUP(op_AND_133))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_133)), DUP(op_AND_133))), CAST(32, MSB(DUP(op_AND_133)), DUP(op_AND_133)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_133)), DUP(op_AND_133))), CAST(32, MSB(DUP(op_AND_133)), DUP(op_AND_133))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_146), DUP(op_AND_146))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_RSHIFT_157 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_159 = LOGAND(op_RSHIFT_157, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_170 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_172 = LOGAND(op_RSHIFT_170, SN(32, 0xffff)); + RzILOpPure *op_MUL_179 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_159), DUP(op_AND_159))), CAST(32, MSB(DUP(op_AND_159)), DUP(op_AND_159)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_159)), DUP(op_AND_159))), CAST(32, MSB(DUP(op_AND_159)), DUP(op_AND_159))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_159)), DUP(op_AND_159))), CAST(32, MSB(DUP(op_AND_159)), DUP(op_AND_159)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_159)), DUP(op_AND_159))), CAST(32, MSB(DUP(op_AND_159)), DUP(op_AND_159))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_172), DUP(op_AND_172))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_ADD_180 = ADD(op_MUL_153, op_MUL_179); + RzILOpPure *op_ADD_183 = ADD(op_ADD_180, CAST(64, MSB(SN(32, 0x4000)), SN(32, 0x4000))); + RzILOpPure *op_RSHIFT_185 = SHIFTRA(op_ADD_183, SN(32, 15)); + RzILOpPure *cond_269 = ITE(DUP(op_EQ_127), op_RSHIFT_185, VARL("h_tmp436")); + RzILOpEffect *op_ASSIGN_271 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_269), DUP(cond_269))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) ((st32) (( ...; + RzILOpEffect *seq_272 = SEQN(2, seq_268, op_ASSIGN_271); + + RzILOpEffect *instruction_sequence = seq_272; + return instruction_sequence; +} + +// Rx += mpy(Rs,Rt):<<1:sat +RzILOpEffect *hex_il_op_m4_mac_up_s1_sat(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_35 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) + (((st64) Rs) * ((st64) Rt) >> 0x1f)), 0x0, 0x20) == ((st64) Rx) + (((st64) Rs) * ((st64) Rt) >> 0x1f))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rx) + (((st64) Rs) * ((st64) Rt) >> 0x1f) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_MUL_9 = MUL(CAST(64, MSB(Rs), DUP(Rs)), CAST(64, MSB(Rt), DUP(Rt))); + RzILOpPure *op_RSHIFT_11 = SHIFTRA(op_MUL_9, SN(32, 31)); + RzILOpPure *op_ADD_12 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_RSHIFT_11); + RzILOpPure *op_MUL_21 = MUL(CAST(64, MSB(DUP(Rs)), DUP(Rs)), CAST(64, MSB(DUP(Rt)), DUP(Rt))); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(op_MUL_21, SN(32, 31)); + RzILOpPure *op_ADD_24 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_RSHIFT_23); + RzILOpPure *op_EQ_25 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_12), SN(32, 0), SN(32, 0x20)), op_ADD_24); + RzILOpPure *op_MUL_39 = MUL(CAST(64, MSB(DUP(Rs)), DUP(Rs)), CAST(64, MSB(DUP(Rt)), DUP(Rt))); + RzILOpPure *op_RSHIFT_41 = SHIFTRA(op_MUL_39, SN(32, 31)); + RzILOpPure *op_ADD_42 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_RSHIFT_41); + RzILOpPure *op_LT_45 = SLT(op_ADD_42, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_50 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_51 = NEG(op_LSHIFT_50); + RzILOpPure *op_LSHIFT_56 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_59 = SUB(op_LSHIFT_56, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_60 = ITE(op_LT_45, op_NEG_51, op_SUB_59); + RzILOpEffect *gcc_expr_61 = BRANCH(op_EQ_25, EMPTY(), set_usr_field_call_35); + + // h_tmp437 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) + (((st64) Rs) * ((st64) Rt) >> 0x1f)), 0x0, 0x20) == ((st64) Rx) + (((st64) Rs) * ((st64) Rt) >> 0x1f))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rx) + (((st64) Rs) * ((st64) Rt) >> 0x1f) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_63 = SETL("h_tmp437", cond_60); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) + (((st64) ...; + RzILOpEffect *seq_64 = SEQN(2, gcc_expr_61, op_ASSIGN_hybrid_tmp_63); + + // Rx = ((st32) ((sextract64(((ut64) ((st64) Rx) + (((st64) Rs) * ((st64) Rt) >> 0x1f)), 0x0, 0x20) == ((st64) Rx) + (((st64) Rs) * ((st64) Rt) >> 0x1f)) ? ((st64) Rx) + (((st64) Rs) * ((st64) Rt) >> 0x1f) : h_tmp437)); + RzILOpPure *op_MUL_29 = MUL(CAST(64, MSB(DUP(Rs)), DUP(Rs)), CAST(64, MSB(DUP(Rt)), DUP(Rt))); + RzILOpPure *op_RSHIFT_31 = SHIFTRA(op_MUL_29, SN(32, 31)); + RzILOpPure *op_ADD_32 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_RSHIFT_31); + RzILOpPure *cond_65 = ITE(DUP(op_EQ_25), op_ADD_32, VARL("h_tmp437")); + RzILOpEffect *op_ASSIGN_67 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(cond_65), DUP(cond_65))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) + (((st ...; + RzILOpEffect *seq_68 = SEQN(2, seq_64, op_ASSIGN_67); + + RzILOpEffect *instruction_sequence = seq_68; + return instruction_sequence; +} + +// Rd = add(Ii,mpyi(Rs,II)) +RzILOpEffect *hex_il_op_m4_mpyri_addi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // U = U; + RzILOpEffect *imm_assign_4 = SETL("U", U); + + // Rd = ((st32) u + ((ut32) Rs) * U); + RzILOpPure *op_MUL_7 = MUL(CAST(32, IL_FALSE, Rs), VARL("U")); + RzILOpPure *op_ADD_8 = ADD(VARL("u"), op_MUL_7); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_ADD_8)); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, imm_assign_4, op_ASSIGN_10); + return instruction_sequence; +} + +// Rd = add(Ru,mpyi(Rs,Ii)) +RzILOpEffect *hex_il_op_m4_mpyri_addr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // Rd = ((st32) ((ut32) Ru) + ((ut32) Rs) * u); + RzILOpPure *op_MUL_6 = MUL(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpPure *op_ADD_8 = ADD(CAST(32, IL_FALSE, Ru), op_MUL_6); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_ADD_8)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_0, op_ASSIGN_10); + return instruction_sequence; +} + +// Rd = add(Ru,mpyi(Ii,Rs)) +RzILOpEffect *hex_il_op_m4_mpyri_addr_u2(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // Rd = ((st32) ((ut32) Ru) + ((ut32) Rs) * u); + RzILOpPure *op_MUL_6 = MUL(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpPure *op_ADD_8 = ADD(CAST(32, IL_FALSE, Ru), op_MUL_6); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_ADD_8)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_3, op_ASSIGN_10); + return instruction_sequence; +} + +// Rd = add(Ii,mpyi(Rs,Rt)) +RzILOpEffect *hex_il_op_m4_mpyrr_addi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // Rd = ((st32) u + ((ut32) Rs * Rt)); + RzILOpPure *op_MUL_5 = MUL(Rs, Rt); + RzILOpPure *op_ADD_7 = ADD(VARL("u"), CAST(32, IL_FALSE, op_MUL_5)); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_ADD_7)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_0, op_ASSIGN_9); + return instruction_sequence; +} + +// Ry = add(Ru,mpyi(Ryin,Rs)) +RzILOpEffect *hex_il_op_m4_mpyrr_addr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Ry_op = ISA2REG(hi, 'y', false); + RzILOpPure *Ry = READ_REG(pkt, Ry_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Ry = Ru + Rs * Ry; + RzILOpPure *op_MUL_3 = MUL(Rs, Ry); + RzILOpPure *op_ADD_4 = ADD(Ru, op_MUL_3); + RzILOpEffect *op_ASSIGN_5 = WRITE_REG(bundle, Ry_op, op_ADD_4); + + RzILOpEffect *instruction_sequence = op_ASSIGN_5; + return instruction_sequence; +} + +// Rx -= mpy(Rs,Rt):<<1:sat +RzILOpEffect *hex_il_op_m4_nac_up_s1_sat(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_35 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) - (((st64) Rs) * ((st64) Rt) >> 0x1f)), 0x0, 0x20) == ((st64) Rx) - (((st64) Rs) * ((st64) Rt) >> 0x1f))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rx) - (((st64) Rs) * ((st64) Rt) >> 0x1f) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_MUL_9 = MUL(CAST(64, MSB(Rs), DUP(Rs)), CAST(64, MSB(Rt), DUP(Rt))); + RzILOpPure *op_RSHIFT_11 = SHIFTRA(op_MUL_9, SN(32, 31)); + RzILOpPure *op_SUB_12 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_RSHIFT_11); + RzILOpPure *op_MUL_21 = MUL(CAST(64, MSB(DUP(Rs)), DUP(Rs)), CAST(64, MSB(DUP(Rt)), DUP(Rt))); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(op_MUL_21, SN(32, 31)); + RzILOpPure *op_SUB_24 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_RSHIFT_23); + RzILOpPure *op_EQ_25 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_12), SN(32, 0), SN(32, 0x20)), op_SUB_24); + RzILOpPure *op_MUL_39 = MUL(CAST(64, MSB(DUP(Rs)), DUP(Rs)), CAST(64, MSB(DUP(Rt)), DUP(Rt))); + RzILOpPure *op_RSHIFT_41 = SHIFTRA(op_MUL_39, SN(32, 31)); + RzILOpPure *op_SUB_42 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_RSHIFT_41); + RzILOpPure *op_LT_45 = SLT(op_SUB_42, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_50 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_51 = NEG(op_LSHIFT_50); + RzILOpPure *op_LSHIFT_56 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_59 = SUB(op_LSHIFT_56, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_60 = ITE(op_LT_45, op_NEG_51, op_SUB_59); + RzILOpEffect *gcc_expr_61 = BRANCH(op_EQ_25, EMPTY(), set_usr_field_call_35); + + // h_tmp438 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) - (((st64) Rs) * ((st64) Rt) >> 0x1f)), 0x0, 0x20) == ((st64) Rx) - (((st64) Rs) * ((st64) Rt) >> 0x1f))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) Rx) - (((st64) Rs) * ((st64) Rt) >> 0x1f) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_63 = SETL("h_tmp438", cond_60); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) - (((st64) ...; + RzILOpEffect *seq_64 = SEQN(2, gcc_expr_61, op_ASSIGN_hybrid_tmp_63); + + // Rx = ((st32) ((sextract64(((ut64) ((st64) Rx) - (((st64) Rs) * ((st64) Rt) >> 0x1f)), 0x0, 0x20) == ((st64) Rx) - (((st64) Rs) * ((st64) Rt) >> 0x1f)) ? ((st64) Rx) - (((st64) Rs) * ((st64) Rt) >> 0x1f) : h_tmp438)); + RzILOpPure *op_MUL_29 = MUL(CAST(64, MSB(DUP(Rs)), DUP(Rs)), CAST(64, MSB(DUP(Rt)), DUP(Rt))); + RzILOpPure *op_RSHIFT_31 = SHIFTRA(op_MUL_29, SN(32, 31)); + RzILOpPure *op_SUB_32 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), op_RSHIFT_31); + RzILOpPure *cond_65 = ITE(DUP(op_EQ_25), op_SUB_32, VARL("h_tmp438")); + RzILOpEffect *op_ASSIGN_67 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(cond_65), DUP(cond_65))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) Rx) - (((st ...; + RzILOpEffect *seq_68 = SEQN(2, seq_64, op_ASSIGN_67); + + RzILOpEffect *instruction_sequence = seq_68; + return instruction_sequence; +} + +// Rx |= and(Rs,Rt) +RzILOpEffect *hex_il_op_m4_or_and(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = (Rx | (Rs & Rt)); + RzILOpPure *op_AND_3 = LOGAND(Rs, Rt); + RzILOpPure *op_OR_4 = LOGOR(READ_REG(pkt, Rx_op, false), op_AND_3); + RzILOpEffect *op_ASSIGN_OR_5 = WRITE_REG(bundle, Rx_op, op_OR_4); + + RzILOpEffect *instruction_sequence = op_ASSIGN_OR_5; + return instruction_sequence; +} + +// Rx |= and(Rs,~Rt) +RzILOpEffect *hex_il_op_m4_or_andn(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = (Rx | (Rs & (~Rt))); + RzILOpPure *op_NOT_3 = LOGNOT(Rt); + RzILOpPure *op_AND_4 = LOGAND(Rs, op_NOT_3); + RzILOpPure *op_OR_5 = LOGOR(READ_REG(pkt, Rx_op, false), op_AND_4); + RzILOpEffect *op_ASSIGN_OR_6 = WRITE_REG(bundle, Rx_op, op_OR_5); + + RzILOpEffect *instruction_sequence = op_ASSIGN_OR_6; + return instruction_sequence; +} + +// Rx |= or(Rs,Rt) +RzILOpEffect *hex_il_op_m4_or_or(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = (Rx | (Rs | Rt)); + RzILOpPure *op_OR_3 = LOGOR(Rs, Rt); + RzILOpPure *op_OR_4 = LOGOR(READ_REG(pkt, Rx_op, false), op_OR_3); + RzILOpEffect *op_ASSIGN_OR_5 = WRITE_REG(bundle, Rx_op, op_OR_4); + + RzILOpEffect *instruction_sequence = op_ASSIGN_OR_5; + return instruction_sequence; +} + +// Rx |= xor(Rs,Rt) +RzILOpEffect *hex_il_op_m4_or_xor(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = (Rx | (Rs ^ Rt)); + RzILOpPure *op_XOR_3 = LOGXOR(Rs, Rt); + RzILOpPure *op_OR_4 = LOGOR(READ_REG(pkt, Rx_op, false), op_XOR_3); + RzILOpEffect *op_ASSIGN_OR_5 = WRITE_REG(bundle, Rx_op, op_OR_4); + + RzILOpEffect *instruction_sequence = op_ASSIGN_OR_5; + return instruction_sequence; +} + +// Rdd = pmpyw(Rs,Rt) +RzILOpEffect *hex_il_op_m4_pmpyw(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rxx ^= pmpyw(Rs,Rt) +RzILOpEffect *hex_il_op_m4_pmpyw_acc(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rdd = vpmpyh(Rs,Rt) +RzILOpEffect *hex_il_op_m4_vpmpyh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + // Declare: ut32 x0; + // Declare: ut32 x1; + // Declare: ut32 y0; + // Declare: ut32 y1; + // Declare: ut32 prod0; + // Declare: ut32 prod1; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // x0 = ((ut32) ((ut16) ((Rs >> 0x0) & 0xffff))); + RzILOpPure *op_RSHIFT_11 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_13 = LOGAND(op_RSHIFT_11, SN(32, 0xffff)); + RzILOpEffect *op_ASSIGN_16 = SETL("x0", CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_13))); + + // x1 = ((ut32) ((ut16) ((Rs >> 0x10) & 0xffff))); + RzILOpPure *op_RSHIFT_20 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_20, SN(32, 0xffff)); + RzILOpEffect *op_ASSIGN_25 = SETL("x1", CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_22))); + + // y0 = ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))); + RzILOpPure *op_RSHIFT_30 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_32 = LOGAND(op_RSHIFT_30, SN(32, 0xffff)); + RzILOpEffect *op_ASSIGN_35 = SETL("y0", CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_32))); + + // y1 = ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))); + RzILOpPure *op_RSHIFT_39 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_41 = LOGAND(op_RSHIFT_39, SN(32, 0xffff)); + RzILOpEffect *op_ASSIGN_44 = SETL("y1", CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_41))); + + // prod1 = ((ut32) 0x0); + RzILOpEffect *op_ASSIGN_47 = SETL("prod1", CAST(32, IL_FALSE, SN(32, 0))); + + // prod0 = ((ut32) 0x0); + RzILOpEffect *op_ASSIGN_48 = SETL("prod0", CAST(32, IL_FALSE, SN(32, 0))); + + // seq(prod0 = ((ut32) 0x0); prod1 = ((ut32) 0x0)); + RzILOpEffect *seq_49 = SEQN(2, op_ASSIGN_48, op_ASSIGN_47); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_51 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_54 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp439 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_56 = SETL("h_tmp439", VARL("i")); + + // seq(h_tmp439 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_57 = SEQN(2, op_ASSIGN_hybrid_tmp_56, op_INC_54); + + // prod0 = (prod0 ^ (x0 << i)); + RzILOpPure *op_LSHIFT_62 = SHIFTL0(VARL("x0"), VARL("i")); + RzILOpPure *op_XOR_63 = LOGXOR(VARL("prod0"), op_LSHIFT_62); + RzILOpEffect *op_ASSIGN_XOR_64 = SETL("prod0", op_XOR_63); + + // seq(prod0 = (prod0 ^ (x0 << i))); + RzILOpEffect *seq_then_65 = op_ASSIGN_XOR_64; + + // if (((y0 >> i) & ((ut32) 0x1))) {seq(prod0 = (prod0 ^ (x0 << i)))} else {{}}; + RzILOpPure *op_RSHIFT_58 = SHIFTR0(VARL("y0"), VARL("i")); + RzILOpPure *op_AND_61 = LOGAND(op_RSHIFT_58, CAST(32, IL_FALSE, SN(32, 1))); + RzILOpEffect *branch_66 = BRANCH(NON_ZERO(op_AND_61), seq_then_65, EMPTY()); + + // prod1 = (prod1 ^ (x1 << i)); + RzILOpPure *op_LSHIFT_71 = SHIFTL0(VARL("x1"), VARL("i")); + RzILOpPure *op_XOR_72 = LOGXOR(VARL("prod1"), op_LSHIFT_71); + RzILOpEffect *op_ASSIGN_XOR_73 = SETL("prod1", op_XOR_72); + + // seq(prod1 = (prod1 ^ (x1 << i))); + RzILOpEffect *seq_then_74 = op_ASSIGN_XOR_73; + + // if (((y1 >> i) & ((ut32) 0x1))) {seq(prod1 = (prod1 ^ (x1 << i)))} else {{}}; + RzILOpPure *op_RSHIFT_67 = SHIFTR0(VARL("y1"), VARL("i")); + RzILOpPure *op_AND_70 = LOGAND(op_RSHIFT_67, CAST(32, IL_FALSE, SN(32, 1))); + RzILOpEffect *branch_75 = BRANCH(NON_ZERO(op_AND_70), seq_then_74, EMPTY()); + + // seq(h_tmp439; if (((y0 >> i) & ((ut32) 0x1))) {seq(prod0 = (prod ...; + RzILOpEffect *seq_76 = SEQN(2, branch_66, branch_75); + + // seq(seq(h_tmp439; if (((y0 >> i) & ((ut32) 0x1))) {seq(prod0 = ( ...; + RzILOpEffect *seq_77 = SEQN(2, seq_76, seq_57); + + // while ((i < 0x10)) { seq(seq(h_tmp439; if (((y0 >> i) & ((ut32) 0x1))) {seq(prod0 = ( ... }; + RzILOpPure *op_LT_53 = SLT(VARL("i"), SN(32, 16)); + RzILOpEffect *for_78 = REPEAT(op_LT_53, seq_77); + + // seq(i = 0x0; while ((i < 0x10)) { seq(seq(h_tmp439; if (((y0 >> ...; + RzILOpEffect *seq_79 = SEQN(2, op_ASSIGN_51, for_78); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x0)))) | (((ut64) (((st32) ((ut16) ((prod0 >> 0x0) & ((ut32) 0xffff)))) & 0xffff)) << 0x0))); + RzILOpPure *op_LSHIFT_85 = SHIFTL0(SN(64, 0xffff), SN(32, 0)); + RzILOpPure *op_NOT_86 = LOGNOT(op_LSHIFT_85); + RzILOpPure *op_AND_87 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_86); + RzILOpPure *op_RSHIFT_91 = SHIFTR0(VARL("prod0"), SN(32, 0)); + RzILOpPure *op_AND_94 = LOGAND(op_RSHIFT_91, CAST(32, IL_FALSE, SN(32, 0xffff))); + RzILOpPure *op_AND_98 = LOGAND(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_94)), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_103 = SHIFTL0(CAST(64, IL_FALSE, op_AND_98), SN(32, 0)); + RzILOpPure *op_OR_105 = LOGOR(CAST(64, IL_FALSE, op_AND_87), op_LSHIFT_103); + RzILOpEffect *op_ASSIGN_107 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_105)); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x10)))) | (((ut64) (((st32) ((ut16) ((prod1 >> 0x0) & ((ut32) 0xffff)))) & 0xffff)) << 0x10))); + RzILOpPure *op_LSHIFT_113 = SHIFTL0(SN(64, 0xffff), SN(32, 16)); + RzILOpPure *op_NOT_114 = LOGNOT(op_LSHIFT_113); + RzILOpPure *op_AND_115 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_114); + RzILOpPure *op_RSHIFT_119 = SHIFTR0(VARL("prod1"), SN(32, 0)); + RzILOpPure *op_AND_122 = LOGAND(op_RSHIFT_119, CAST(32, IL_FALSE, SN(32, 0xffff))); + RzILOpPure *op_AND_126 = LOGAND(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_122)), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_131 = SHIFTL0(CAST(64, IL_FALSE, op_AND_126), SN(32, 16)); + RzILOpPure *op_OR_133 = LOGOR(CAST(64, IL_FALSE, op_AND_115), op_LSHIFT_131); + RzILOpEffect *op_ASSIGN_135 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_133)); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x20)))) | (((ut64) (((st32) ((ut16) ((prod0 >> 0x10) & ((ut32) 0xffff)))) & 0xffff)) << 0x20))); + RzILOpPure *op_LSHIFT_141 = SHIFTL0(SN(64, 0xffff), SN(32, 0x20)); + RzILOpPure *op_NOT_142 = LOGNOT(op_LSHIFT_141); + RzILOpPure *op_AND_143 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_142); + RzILOpPure *op_RSHIFT_147 = SHIFTR0(VARL("prod0"), SN(32, 16)); + RzILOpPure *op_AND_150 = LOGAND(op_RSHIFT_147, CAST(32, IL_FALSE, SN(32, 0xffff))); + RzILOpPure *op_AND_154 = LOGAND(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_150)), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_159 = SHIFTL0(CAST(64, IL_FALSE, op_AND_154), SN(32, 0x20)); + RzILOpPure *op_OR_161 = LOGOR(CAST(64, IL_FALSE, op_AND_143), op_LSHIFT_159); + RzILOpEffect *op_ASSIGN_163 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_161)); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x30)))) | (((ut64) (((st32) ((ut16) ((prod1 >> 0x10) & ((ut32) 0xffff)))) & 0xffff)) << 0x30))); + RzILOpPure *op_LSHIFT_169 = SHIFTL0(SN(64, 0xffff), SN(32, 0x30)); + RzILOpPure *op_NOT_170 = LOGNOT(op_LSHIFT_169); + RzILOpPure *op_AND_171 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_170); + RzILOpPure *op_RSHIFT_175 = SHIFTR0(VARL("prod1"), SN(32, 16)); + RzILOpPure *op_AND_178 = LOGAND(op_RSHIFT_175, CAST(32, IL_FALSE, SN(32, 0xffff))); + RzILOpPure *op_AND_182 = LOGAND(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_178)), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_187 = SHIFTL0(CAST(64, IL_FALSE, op_AND_182), SN(32, 0x30)); + RzILOpPure *op_OR_189 = LOGOR(CAST(64, IL_FALSE, op_AND_171), op_LSHIFT_187); + RzILOpEffect *op_ASSIGN_191 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_189)); + + RzILOpEffect *instruction_sequence = SEQN(10, op_ASSIGN_16, op_ASSIGN_25, op_ASSIGN_35, op_ASSIGN_44, seq_49, seq_79, op_ASSIGN_107, op_ASSIGN_135, op_ASSIGN_163, op_ASSIGN_191); + return instruction_sequence; +} + +// Rxx ^= vpmpyh(Rs,Rt) +RzILOpEffect *hex_il_op_m4_vpmpyh_acc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + // Declare: ut32 x0; + // Declare: ut32 x1; + // Declare: ut32 y0; + // Declare: ut32 y1; + // Declare: ut32 prod0; + // Declare: ut32 prod1; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + // x0 = ((ut32) ((ut16) ((Rs >> 0x0) & 0xffff))); + RzILOpPure *op_RSHIFT_11 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_13 = LOGAND(op_RSHIFT_11, SN(32, 0xffff)); + RzILOpEffect *op_ASSIGN_16 = SETL("x0", CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_13))); + + // x1 = ((ut32) ((ut16) ((Rs >> 0x10) & 0xffff))); + RzILOpPure *op_RSHIFT_20 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_20, SN(32, 0xffff)); + RzILOpEffect *op_ASSIGN_25 = SETL("x1", CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_22))); + + // y0 = ((ut32) ((ut16) ((Rt >> 0x0) & 0xffff))); + RzILOpPure *op_RSHIFT_30 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_32 = LOGAND(op_RSHIFT_30, SN(32, 0xffff)); + RzILOpEffect *op_ASSIGN_35 = SETL("y0", CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_32))); + + // y1 = ((ut32) ((ut16) ((Rt >> 0x10) & 0xffff))); + RzILOpPure *op_RSHIFT_39 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_41 = LOGAND(op_RSHIFT_39, SN(32, 0xffff)); + RzILOpEffect *op_ASSIGN_44 = SETL("y1", CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_41))); + + // prod1 = ((ut32) 0x0); + RzILOpEffect *op_ASSIGN_47 = SETL("prod1", CAST(32, IL_FALSE, SN(32, 0))); + + // prod0 = ((ut32) 0x0); + RzILOpEffect *op_ASSIGN_48 = SETL("prod0", CAST(32, IL_FALSE, SN(32, 0))); + + // seq(prod0 = ((ut32) 0x0); prod1 = ((ut32) 0x0)); + RzILOpEffect *seq_49 = SEQN(2, op_ASSIGN_48, op_ASSIGN_47); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_51 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_54 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp440 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_56 = SETL("h_tmp440", VARL("i")); + + // seq(h_tmp440 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_57 = SEQN(2, op_ASSIGN_hybrid_tmp_56, op_INC_54); + + // prod0 = (prod0 ^ (x0 << i)); + RzILOpPure *op_LSHIFT_62 = SHIFTL0(VARL("x0"), VARL("i")); + RzILOpPure *op_XOR_63 = LOGXOR(VARL("prod0"), op_LSHIFT_62); + RzILOpEffect *op_ASSIGN_XOR_64 = SETL("prod0", op_XOR_63); + + // seq(prod0 = (prod0 ^ (x0 << i))); + RzILOpEffect *seq_then_65 = op_ASSIGN_XOR_64; + + // if (((y0 >> i) & ((ut32) 0x1))) {seq(prod0 = (prod0 ^ (x0 << i)))} else {{}}; + RzILOpPure *op_RSHIFT_58 = SHIFTR0(VARL("y0"), VARL("i")); + RzILOpPure *op_AND_61 = LOGAND(op_RSHIFT_58, CAST(32, IL_FALSE, SN(32, 1))); + RzILOpEffect *branch_66 = BRANCH(NON_ZERO(op_AND_61), seq_then_65, EMPTY()); + + // prod1 = (prod1 ^ (x1 << i)); + RzILOpPure *op_LSHIFT_71 = SHIFTL0(VARL("x1"), VARL("i")); + RzILOpPure *op_XOR_72 = LOGXOR(VARL("prod1"), op_LSHIFT_71); + RzILOpEffect *op_ASSIGN_XOR_73 = SETL("prod1", op_XOR_72); + + // seq(prod1 = (prod1 ^ (x1 << i))); + RzILOpEffect *seq_then_74 = op_ASSIGN_XOR_73; + + // if (((y1 >> i) & ((ut32) 0x1))) {seq(prod1 = (prod1 ^ (x1 << i)))} else {{}}; + RzILOpPure *op_RSHIFT_67 = SHIFTR0(VARL("y1"), VARL("i")); + RzILOpPure *op_AND_70 = LOGAND(op_RSHIFT_67, CAST(32, IL_FALSE, SN(32, 1))); + RzILOpEffect *branch_75 = BRANCH(NON_ZERO(op_AND_70), seq_then_74, EMPTY()); + + // seq(h_tmp440; if (((y0 >> i) & ((ut32) 0x1))) {seq(prod0 = (prod ...; + RzILOpEffect *seq_76 = SEQN(2, branch_66, branch_75); + + // seq(seq(h_tmp440; if (((y0 >> i) & ((ut32) 0x1))) {seq(prod0 = ( ...; + RzILOpEffect *seq_77 = SEQN(2, seq_76, seq_57); + + // while ((i < 0x10)) { seq(seq(h_tmp440; if (((y0 >> i) & ((ut32) 0x1))) {seq(prod0 = ( ... }; + RzILOpPure *op_LT_53 = SLT(VARL("i"), SN(32, 16)); + RzILOpEffect *for_78 = REPEAT(op_LT_53, seq_77); + + // seq(i = 0x0; while ((i < 0x10)) { seq(seq(h_tmp440; if (((y0 >> ...; + RzILOpEffect *seq_79 = SEQN(2, op_ASSIGN_51, for_78); + + // Rxx = ((st64) (((ut64) (Rxx & (~(0xffff << 0x0)))) | (((ut64) ((((st32) ((ut16) ((Rxx >> 0x0) & ((st64) 0xffff)))) ^ ((st32) ((ut16) ((prod0 >> 0x0) & ((ut32) 0xffff))))) & 0xffff)) << 0x0))); + RzILOpPure *op_LSHIFT_85 = SHIFTL0(SN(64, 0xffff), SN(32, 0)); + RzILOpPure *op_NOT_86 = LOGNOT(op_LSHIFT_85); + RzILOpPure *op_AND_87 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_86); + RzILOpPure *op_RSHIFT_91 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_94 = LOGAND(op_RSHIFT_91, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_99 = SHIFTR0(VARL("prod0"), SN(32, 0)); + RzILOpPure *op_AND_102 = LOGAND(op_RSHIFT_99, CAST(32, IL_FALSE, SN(32, 0xffff))); + RzILOpPure *op_XOR_106 = LOGXOR(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_94)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_102))); + RzILOpPure *op_AND_108 = LOGAND(op_XOR_106, SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_113 = SHIFTL0(CAST(64, IL_FALSE, op_AND_108), SN(32, 0)); + RzILOpPure *op_OR_115 = LOGOR(CAST(64, IL_FALSE, op_AND_87), op_LSHIFT_113); + RzILOpEffect *op_ASSIGN_117 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_OR_115)); + + // Rxx = ((st64) (((ut64) (Rxx & (~(0xffff << 0x10)))) | (((ut64) ((((st32) ((ut16) ((Rxx >> 0x10) & ((st64) 0xffff)))) ^ ((st32) ((ut16) ((prod1 >> 0x0) & ((ut32) 0xffff))))) & 0xffff)) << 0x10))); + RzILOpPure *op_LSHIFT_123 = SHIFTL0(SN(64, 0xffff), SN(32, 16)); + RzILOpPure *op_NOT_124 = LOGNOT(op_LSHIFT_123); + RzILOpPure *op_AND_125 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_124); + RzILOpPure *op_RSHIFT_129 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 16)); + RzILOpPure *op_AND_132 = LOGAND(op_RSHIFT_129, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_137 = SHIFTR0(VARL("prod1"), SN(32, 0)); + RzILOpPure *op_AND_140 = LOGAND(op_RSHIFT_137, CAST(32, IL_FALSE, SN(32, 0xffff))); + RzILOpPure *op_XOR_144 = LOGXOR(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_132)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_140))); + RzILOpPure *op_AND_146 = LOGAND(op_XOR_144, SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_151 = SHIFTL0(CAST(64, IL_FALSE, op_AND_146), SN(32, 16)); + RzILOpPure *op_OR_153 = LOGOR(CAST(64, IL_FALSE, op_AND_125), op_LSHIFT_151); + RzILOpEffect *op_ASSIGN_155 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_OR_153)); + + // Rxx = ((st64) (((ut64) (Rxx & (~(0xffff << 0x20)))) | (((ut64) ((((st32) ((ut16) ((Rxx >> 0x20) & ((st64) 0xffff)))) ^ ((st32) ((ut16) ((prod0 >> 0x10) & ((ut32) 0xffff))))) & 0xffff)) << 0x20))); + RzILOpPure *op_LSHIFT_161 = SHIFTL0(SN(64, 0xffff), SN(32, 0x20)); + RzILOpPure *op_NOT_162 = LOGNOT(op_LSHIFT_161); + RzILOpPure *op_AND_163 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_162); + RzILOpPure *op_RSHIFT_167 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_170 = LOGAND(op_RSHIFT_167, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_175 = SHIFTR0(VARL("prod0"), SN(32, 16)); + RzILOpPure *op_AND_178 = LOGAND(op_RSHIFT_175, CAST(32, IL_FALSE, SN(32, 0xffff))); + RzILOpPure *op_XOR_182 = LOGXOR(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_170)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_178))); + RzILOpPure *op_AND_184 = LOGAND(op_XOR_182, SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_189 = SHIFTL0(CAST(64, IL_FALSE, op_AND_184), SN(32, 0x20)); + RzILOpPure *op_OR_191 = LOGOR(CAST(64, IL_FALSE, op_AND_163), op_LSHIFT_189); + RzILOpEffect *op_ASSIGN_193 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_OR_191)); + + // Rxx = ((st64) (((ut64) (Rxx & (~(0xffff << 0x30)))) | (((ut64) ((((st32) ((ut16) ((Rxx >> 0x30) & ((st64) 0xffff)))) ^ ((st32) ((ut16) ((prod1 >> 0x10) & ((ut32) 0xffff))))) & 0xffff)) << 0x30))); + RzILOpPure *op_LSHIFT_199 = SHIFTL0(SN(64, 0xffff), SN(32, 0x30)); + RzILOpPure *op_NOT_200 = LOGNOT(op_LSHIFT_199); + RzILOpPure *op_AND_201 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_200); + RzILOpPure *op_RSHIFT_205 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x30)); + RzILOpPure *op_AND_208 = LOGAND(op_RSHIFT_205, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_213 = SHIFTR0(VARL("prod1"), SN(32, 16)); + RzILOpPure *op_AND_216 = LOGAND(op_RSHIFT_213, CAST(32, IL_FALSE, SN(32, 0xffff))); + RzILOpPure *op_XOR_220 = LOGXOR(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_208)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_216))); + RzILOpPure *op_AND_222 = LOGAND(op_XOR_220, SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_227 = SHIFTL0(CAST(64, IL_FALSE, op_AND_222), SN(32, 0x30)); + RzILOpPure *op_OR_229 = LOGOR(CAST(64, IL_FALSE, op_AND_201), op_LSHIFT_227); + RzILOpEffect *op_ASSIGN_231 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_OR_229)); + + RzILOpEffect *instruction_sequence = SEQN(10, op_ASSIGN_16, op_ASSIGN_25, op_ASSIGN_35, op_ASSIGN_44, seq_49, seq_79, op_ASSIGN_117, op_ASSIGN_155, op_ASSIGN_193, op_ASSIGN_231); + return instruction_sequence; +} + +// Rxx += vrmpyweh(Rss,Rtt) +RzILOpEffect *hex_il_op_m4_vrmpyeh_acc_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rxx = Rxx + (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rtt, SN(32, 0x20)); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_29 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_7), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_22), DUP(op_AND_22))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_31 = SHIFTL0(op_MUL_29, SN(32, 0)); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_37 = LOGAND(op_RSHIFT_35, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_48 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_51 = LOGAND(op_RSHIFT_48, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_58 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_37), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_51), DUP(op_AND_51))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_60 = SHIFTL0(op_MUL_58, SN(32, 0)); + RzILOpPure *op_ADD_61 = ADD(op_LSHIFT_31, op_LSHIFT_60); + RzILOpPure *op_ADD_62 = ADD(READ_REG(pkt, Rxx_op, false), op_ADD_61); + RzILOpEffect *op_ASSIGN_ADD_63 = WRITE_REG(bundle, Rxx_op, op_ADD_62); + + RzILOpEffect *instruction_sequence = op_ASSIGN_ADD_63; + return instruction_sequence; +} + +// Rxx += vrmpyweh(Rss,Rtt):<<1 +RzILOpEffect *hex_il_op_m4_vrmpyeh_acc_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rxx = Rxx + (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rtt, SN(32, 0x20)); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_29 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_7), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_22), DUP(op_AND_22))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_31 = SHIFTL0(op_MUL_29, SN(32, 1)); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_37 = LOGAND(op_RSHIFT_35, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_48 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_51 = LOGAND(op_RSHIFT_48, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_58 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_37), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_51), DUP(op_AND_51))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_60 = SHIFTL0(op_MUL_58, SN(32, 1)); + RzILOpPure *op_ADD_61 = ADD(op_LSHIFT_31, op_LSHIFT_60); + RzILOpPure *op_ADD_62 = ADD(READ_REG(pkt, Rxx_op, false), op_ADD_61); + RzILOpEffect *op_ASSIGN_ADD_63 = WRITE_REG(bundle, Rxx_op, op_ADD_62); + + RzILOpEffect *instruction_sequence = op_ASSIGN_ADD_63; + return instruction_sequence; +} + +// Rdd = vrmpyweh(Rss,Rtt) +RzILOpEffect *hex_il_op_m4_vrmpyeh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rdd = (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rtt, SN(32, 0x20)); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_29 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_7), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_22), DUP(op_AND_22))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_31 = SHIFTL0(op_MUL_29, SN(32, 0)); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_37 = LOGAND(op_RSHIFT_35, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_48 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_51 = LOGAND(op_RSHIFT_48, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_58 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_37), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_51), DUP(op_AND_51))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_60 = SHIFTL0(op_MUL_58, SN(32, 0)); + RzILOpPure *op_ADD_61 = ADD(op_LSHIFT_31, op_LSHIFT_60); + RzILOpEffect *op_ASSIGN_62 = WRITE_REG(bundle, Rdd_op, op_ADD_61); + + RzILOpEffect *instruction_sequence = op_ASSIGN_62; + return instruction_sequence; +} + +// Rdd = vrmpyweh(Rss,Rtt):<<1 +RzILOpEffect *hex_il_op_m4_vrmpyeh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rdd = (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rtt, SN(32, 0x20)); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_29 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_7), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_22), DUP(op_AND_22))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_31 = SHIFTL0(op_MUL_29, SN(32, 1)); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_37 = LOGAND(op_RSHIFT_35, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_48 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_51 = LOGAND(op_RSHIFT_48, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_58 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_37), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_51), DUP(op_AND_51))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_60 = SHIFTL0(op_MUL_58, SN(32, 1)); + RzILOpPure *op_ADD_61 = ADD(op_LSHIFT_31, op_LSHIFT_60); + RzILOpEffect *op_ASSIGN_62 = WRITE_REG(bundle, Rdd_op, op_ADD_61); + + RzILOpEffect *instruction_sequence = op_ASSIGN_62; + return instruction_sequence; +} + +// Rxx += vrmpywoh(Rss,Rtt) +RzILOpEffect *hex_il_op_m4_vrmpyoh_acc_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rxx = Rxx + (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rtt, SN(32, 0x30)); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_29 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_7), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_22), DUP(op_AND_22))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_31 = SHIFTL0(op_MUL_29, SN(32, 0)); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_37 = LOGAND(op_RSHIFT_35, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_48 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_51 = LOGAND(op_RSHIFT_48, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_58 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_37), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_51), DUP(op_AND_51))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_60 = SHIFTL0(op_MUL_58, SN(32, 0)); + RzILOpPure *op_ADD_61 = ADD(op_LSHIFT_31, op_LSHIFT_60); + RzILOpPure *op_ADD_62 = ADD(READ_REG(pkt, Rxx_op, false), op_ADD_61); + RzILOpEffect *op_ASSIGN_ADD_63 = WRITE_REG(bundle, Rxx_op, op_ADD_62); + + RzILOpEffect *instruction_sequence = op_ASSIGN_ADD_63; + return instruction_sequence; +} + +// Rxx += vrmpywoh(Rss,Rtt):<<1 +RzILOpEffect *hex_il_op_m4_vrmpyoh_acc_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rxx = Rxx + (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rtt, SN(32, 0x30)); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_29 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_7), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_22), DUP(op_AND_22))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_31 = SHIFTL0(op_MUL_29, SN(32, 1)); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_37 = LOGAND(op_RSHIFT_35, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_48 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_51 = LOGAND(op_RSHIFT_48, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_58 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_37), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_51), DUP(op_AND_51))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_60 = SHIFTL0(op_MUL_58, SN(32, 1)); + RzILOpPure *op_ADD_61 = ADD(op_LSHIFT_31, op_LSHIFT_60); + RzILOpPure *op_ADD_62 = ADD(READ_REG(pkt, Rxx_op, false), op_ADD_61); + RzILOpEffect *op_ASSIGN_ADD_63 = WRITE_REG(bundle, Rxx_op, op_ADD_62); + + RzILOpEffect *instruction_sequence = op_ASSIGN_ADD_63; + return instruction_sequence; +} + +// Rdd = vrmpywoh(Rss,Rtt) +RzILOpEffect *hex_il_op_m4_vrmpyoh_s0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rdd = (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0) + (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x0); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rtt, SN(32, 0x30)); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_29 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_7), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_22), DUP(op_AND_22))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_31 = SHIFTL0(op_MUL_29, SN(32, 0)); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_37 = LOGAND(op_RSHIFT_35, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_48 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_51 = LOGAND(op_RSHIFT_48, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_58 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_37), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_51), DUP(op_AND_51))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_60 = SHIFTL0(op_MUL_58, SN(32, 0)); + RzILOpPure *op_ADD_61 = ADD(op_LSHIFT_31, op_LSHIFT_60); + RzILOpEffect *op_ASSIGN_62 = WRITE_REG(bundle, Rdd_op, op_ADD_61); + + RzILOpEffect *instruction_sequence = op_ASSIGN_62; + return instruction_sequence; +} + +// Rdd = vrmpywoh(Rss,Rtt):<<1 +RzILOpEffect *hex_il_op_m4_vrmpyoh_s1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rdd = (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1) + (((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * sextract64(((ut64) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))), 0x0, 0x10) << 0x1); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rtt, SN(32, 0x30)); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_29 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_7), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_22), DUP(op_AND_22))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_31 = SHIFTL0(op_MUL_29, SN(32, 1)); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_37 = LOGAND(op_RSHIFT_35, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_48 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_51 = LOGAND(op_RSHIFT_48, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_58 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_37), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))))), SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_51), DUP(op_AND_51))), SN(32, 0), SN(32, 16))); + RzILOpPure *op_LSHIFT_60 = SHIFTL0(op_MUL_58, SN(32, 1)); + RzILOpPure *op_ADD_61 = ADD(op_LSHIFT_31, op_LSHIFT_60); + RzILOpEffect *op_ASSIGN_62 = WRITE_REG(bundle, Rdd_op, op_ADD_61); + + RzILOpEffect *instruction_sequence = op_ASSIGN_62; + return instruction_sequence; +} + +// Rx ^= and(Rs,Rt) +RzILOpEffect *hex_il_op_m4_xor_and(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = (Rx ^ (Rs & Rt)); + RzILOpPure *op_AND_3 = LOGAND(Rs, Rt); + RzILOpPure *op_XOR_4 = LOGXOR(READ_REG(pkt, Rx_op, false), op_AND_3); + RzILOpEffect *op_ASSIGN_XOR_5 = WRITE_REG(bundle, Rx_op, op_XOR_4); + + RzILOpEffect *instruction_sequence = op_ASSIGN_XOR_5; + return instruction_sequence; +} + +// Rx ^= and(Rs,~Rt) +RzILOpEffect *hex_il_op_m4_xor_andn(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = (Rx ^ (Rs & (~Rt))); + RzILOpPure *op_NOT_3 = LOGNOT(Rt); + RzILOpPure *op_AND_4 = LOGAND(Rs, op_NOT_3); + RzILOpPure *op_XOR_5 = LOGXOR(READ_REG(pkt, Rx_op, false), op_AND_4); + RzILOpEffect *op_ASSIGN_XOR_6 = WRITE_REG(bundle, Rx_op, op_XOR_5); + + RzILOpEffect *instruction_sequence = op_ASSIGN_XOR_6; + return instruction_sequence; +} + +// Rx ^= or(Rs,Rt) +RzILOpEffect *hex_il_op_m4_xor_or(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rx = (Rx ^ (Rs | Rt)); + RzILOpPure *op_OR_3 = LOGOR(Rs, Rt); + RzILOpPure *op_XOR_4 = LOGXOR(READ_REG(pkt, Rx_op, false), op_OR_3); + RzILOpEffect *op_ASSIGN_XOR_5 = WRITE_REG(bundle, Rx_op, op_XOR_4); + + RzILOpEffect *instruction_sequence = op_ASSIGN_XOR_5; + return instruction_sequence; +} + +// Rxx ^= xor(Rss,Rtt) +RzILOpEffect *hex_il_op_m4_xor_xacc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rxx = (Rxx ^ (Rss ^ Rtt)); + RzILOpPure *op_XOR_3 = LOGXOR(Rss, Rtt); + RzILOpPure *op_XOR_4 = LOGXOR(READ_REG(pkt, Rxx_op, false), op_XOR_3); + RzILOpEffect *op_ASSIGN_XOR_5 = WRITE_REG(bundle, Rxx_op, op_XOR_4); + + RzILOpEffect *instruction_sequence = op_ASSIGN_XOR_5; + return instruction_sequence; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_M5_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_M5_ops.c new file mode 100644 index 00000000000..939a2ebe33c --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_M5_ops.c @@ -0,0 +1,1344 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// Rxx += vdmpybsu(Rss,Rtt):sat +RzILOpEffect *hex_il_op_m5_vdmacbsu(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_189 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ((Rxx >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x0) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x0) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x8) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x8) & ((st64) 0xff))))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rxx >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x0) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x0) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x8) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x8) & ((st64) 0xff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((st16) ((Rxx >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x0) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x0) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x8) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x8) & ((st64) 0xff)))))) < ((st64) 0x0)) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_17 = LOGAND(op_RSHIFT_14, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_26 = LOGAND(op_RSHIFT_23, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_34 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_37 = LOGAND(op_RSHIFT_34, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_41 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_26), DUP(op_AND_26))), CAST(8, MSB(DUP(op_AND_26)), DUP(op_AND_26)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_26)), DUP(op_AND_26))), CAST(8, MSB(DUP(op_AND_26)), DUP(op_AND_26)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_37))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_37))))); + RzILOpPure *op_ADD_45 = ADD(CAST(64, MSB(CAST(32, MSB(CAST(16, MSB(op_AND_17), DUP(op_AND_17))), CAST(16, MSB(DUP(op_AND_17)), DUP(op_AND_17)))), CAST(32, MSB(CAST(16, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(16, MSB(DUP(op_AND_17)), DUP(op_AND_17)))), CAST(64, MSB(op_MUL_41), DUP(op_MUL_41))); + RzILOpPure *op_RSHIFT_49 = SHIFTRA(DUP(Rss), SN(32, 8)); + RzILOpPure *op_AND_52 = LOGAND(op_RSHIFT_49, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_59 = SHIFTRA(DUP(Rtt), SN(32, 8)); + RzILOpPure *op_AND_62 = LOGAND(op_RSHIFT_59, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_66 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_52), DUP(op_AND_52))), CAST(8, MSB(DUP(op_AND_52)), DUP(op_AND_52)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_52)), DUP(op_AND_52))), CAST(8, MSB(DUP(op_AND_52)), DUP(op_AND_52)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_62))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_62))))); + RzILOpPure *op_ADD_68 = ADD(op_ADD_45, CAST(64, MSB(op_MUL_66), DUP(op_MUL_66))); + RzILOpPure *op_RSHIFT_77 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_80 = LOGAND(op_RSHIFT_77, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_85 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_88 = LOGAND(op_RSHIFT_85, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_95 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_98 = LOGAND(op_RSHIFT_95, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_102 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_88), DUP(op_AND_88))), CAST(8, MSB(DUP(op_AND_88)), DUP(op_AND_88)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_88)), DUP(op_AND_88))), CAST(8, MSB(DUP(op_AND_88)), DUP(op_AND_88)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_98))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_98))))); + RzILOpPure *op_ADD_106 = ADD(CAST(64, MSB(CAST(32, MSB(CAST(16, MSB(op_AND_80), DUP(op_AND_80))), CAST(16, MSB(DUP(op_AND_80)), DUP(op_AND_80)))), CAST(32, MSB(CAST(16, MSB(DUP(op_AND_80)), DUP(op_AND_80))), CAST(16, MSB(DUP(op_AND_80)), DUP(op_AND_80)))), CAST(64, MSB(op_MUL_102), DUP(op_MUL_102))); + RzILOpPure *op_RSHIFT_110 = SHIFTRA(DUP(Rss), SN(32, 8)); + RzILOpPure *op_AND_113 = LOGAND(op_RSHIFT_110, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_120 = SHIFTRA(DUP(Rtt), SN(32, 8)); + RzILOpPure *op_AND_123 = LOGAND(op_RSHIFT_120, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_127 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_113), DUP(op_AND_113))), CAST(8, MSB(DUP(op_AND_113)), DUP(op_AND_113)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_113)), DUP(op_AND_113))), CAST(8, MSB(DUP(op_AND_113)), DUP(op_AND_113)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_123))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_123))))); + RzILOpPure *op_ADD_129 = ADD(op_ADD_106, CAST(64, MSB(op_MUL_127), DUP(op_MUL_127))); + RzILOpPure *op_EQ_130 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_68), SN(32, 0), SN(32, 16)), op_ADD_129); + RzILOpPure *op_RSHIFT_193 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_196 = LOGAND(op_RSHIFT_193, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_201 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_204 = LOGAND(op_RSHIFT_201, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_211 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_214 = LOGAND(op_RSHIFT_211, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_218 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_204), DUP(op_AND_204))), CAST(8, MSB(DUP(op_AND_204)), DUP(op_AND_204)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_204)), DUP(op_AND_204))), CAST(8, MSB(DUP(op_AND_204)), DUP(op_AND_204)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_214))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_214))))); + RzILOpPure *op_ADD_222 = ADD(CAST(64, MSB(CAST(32, MSB(CAST(16, MSB(op_AND_196), DUP(op_AND_196))), CAST(16, MSB(DUP(op_AND_196)), DUP(op_AND_196)))), CAST(32, MSB(CAST(16, MSB(DUP(op_AND_196)), DUP(op_AND_196))), CAST(16, MSB(DUP(op_AND_196)), DUP(op_AND_196)))), CAST(64, MSB(op_MUL_218), DUP(op_MUL_218))); + RzILOpPure *op_RSHIFT_226 = SHIFTRA(DUP(Rss), SN(32, 8)); + RzILOpPure *op_AND_229 = LOGAND(op_RSHIFT_226, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_236 = SHIFTRA(DUP(Rtt), SN(32, 8)); + RzILOpPure *op_AND_239 = LOGAND(op_RSHIFT_236, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_243 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_229), DUP(op_AND_229))), CAST(8, MSB(DUP(op_AND_229)), DUP(op_AND_229)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_229)), DUP(op_AND_229))), CAST(8, MSB(DUP(op_AND_229)), DUP(op_AND_229)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_239))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_239))))); + RzILOpPure *op_ADD_245 = ADD(op_ADD_222, CAST(64, MSB(op_MUL_243), DUP(op_MUL_243))); + RzILOpPure *op_LT_248 = SLT(op_ADD_245, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_253 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_254 = NEG(op_LSHIFT_253); + RzILOpPure *op_LSHIFT_259 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_262 = SUB(op_LSHIFT_259, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_263 = ITE(op_LT_248, op_NEG_254, op_SUB_262); + RzILOpEffect *gcc_expr_264 = BRANCH(op_EQ_130, EMPTY(), set_usr_field_call_189); + + // h_tmp441 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ((Rxx >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x0) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x0) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x8) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x8) & ((st64) 0xff))))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rxx >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x0) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x0) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x8) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x8) & ((st64) 0xff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((st16) ((Rxx >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x0) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x0) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x8) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x8) & ((st64) 0xff)))))) < ((st64) 0x0)) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_266 = SETL("h_tmp441", cond_263); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ...; + RzILOpEffect *seq_267 = SEQN(2, gcc_expr_264, op_ASSIGN_hybrid_tmp_266); + + // Rxx = ((st64) (((ut64) (Rxx & (~(0xffff << 0x0)))) | (((ut64) (((sextract64(((ut64) ((st64) ((st32) ((st16) ((Rxx >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x0) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x0) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x8) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x8) & ((st64) 0xff))))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rxx >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x0) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x0) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x8) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x8) & ((st64) 0xff))))))) ? ((st64) ((st32) ((st16) ((Rxx >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x0) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x0) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x8) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x8) & ((st64) 0xff)))))) : h_tmp441) & ((st64) 0xffff))) << 0x0))); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_134 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_137 = LOGAND(op_RSHIFT_134, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_142 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_145 = LOGAND(op_RSHIFT_142, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_152 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_155 = LOGAND(op_RSHIFT_152, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_159 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_145), DUP(op_AND_145))), CAST(8, MSB(DUP(op_AND_145)), DUP(op_AND_145)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_145)), DUP(op_AND_145))), CAST(8, MSB(DUP(op_AND_145)), DUP(op_AND_145)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_155))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_155))))); + RzILOpPure *op_ADD_163 = ADD(CAST(64, MSB(CAST(32, MSB(CAST(16, MSB(op_AND_137), DUP(op_AND_137))), CAST(16, MSB(DUP(op_AND_137)), DUP(op_AND_137)))), CAST(32, MSB(CAST(16, MSB(DUP(op_AND_137)), DUP(op_AND_137))), CAST(16, MSB(DUP(op_AND_137)), DUP(op_AND_137)))), CAST(64, MSB(op_MUL_159), DUP(op_MUL_159))); + RzILOpPure *op_RSHIFT_167 = SHIFTRA(DUP(Rss), SN(32, 8)); + RzILOpPure *op_AND_170 = LOGAND(op_RSHIFT_167, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_177 = SHIFTRA(DUP(Rtt), SN(32, 8)); + RzILOpPure *op_AND_180 = LOGAND(op_RSHIFT_177, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_184 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_170), DUP(op_AND_170))), CAST(8, MSB(DUP(op_AND_170)), DUP(op_AND_170)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_170)), DUP(op_AND_170))), CAST(8, MSB(DUP(op_AND_170)), DUP(op_AND_170)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_180))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_180))))); + RzILOpPure *op_ADD_186 = ADD(op_ADD_163, CAST(64, MSB(op_MUL_184), DUP(op_MUL_184))); + RzILOpPure *cond_268 = ITE(DUP(op_EQ_130), op_ADD_186, VARL("h_tmp441")); + RzILOpPure *op_AND_271 = LOGAND(cond_268, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_276 = SHIFTL0(CAST(64, IL_FALSE, op_AND_271), SN(32, 0)); + RzILOpPure *op_OR_278 = LOGOR(CAST(64, IL_FALSE, op_AND_7), op_LSHIFT_276); + RzILOpEffect *op_ASSIGN_280 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_OR_278)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((s ...; + RzILOpEffect *seq_281 = SEQN(2, seq_267, op_ASSIGN_280); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_469 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ((Rxx >> 0x10) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x10) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x10) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x18) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x18) & ((st64) 0xff))))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rxx >> 0x10) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x10) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x10) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x18) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x18) & ((st64) 0xff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((st16) ((Rxx >> 0x10) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x10) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x10) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x18) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x18) & ((st64) 0xff)))))) < ((st64) 0x0)) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_296 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 16)); + RzILOpPure *op_AND_299 = LOGAND(op_RSHIFT_296, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_304 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_307 = LOGAND(op_RSHIFT_304, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_314 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_317 = LOGAND(op_RSHIFT_314, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_321 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_307), DUP(op_AND_307))), CAST(8, MSB(DUP(op_AND_307)), DUP(op_AND_307)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_307)), DUP(op_AND_307))), CAST(8, MSB(DUP(op_AND_307)), DUP(op_AND_307)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_317))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_317))))); + RzILOpPure *op_ADD_325 = ADD(CAST(64, MSB(CAST(32, MSB(CAST(16, MSB(op_AND_299), DUP(op_AND_299))), CAST(16, MSB(DUP(op_AND_299)), DUP(op_AND_299)))), CAST(32, MSB(CAST(16, MSB(DUP(op_AND_299)), DUP(op_AND_299))), CAST(16, MSB(DUP(op_AND_299)), DUP(op_AND_299)))), CAST(64, MSB(op_MUL_321), DUP(op_MUL_321))); + RzILOpPure *op_RSHIFT_329 = SHIFTRA(DUP(Rss), SN(32, 24)); + RzILOpPure *op_AND_332 = LOGAND(op_RSHIFT_329, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_339 = SHIFTRA(DUP(Rtt), SN(32, 24)); + RzILOpPure *op_AND_342 = LOGAND(op_RSHIFT_339, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_346 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_332), DUP(op_AND_332))), CAST(8, MSB(DUP(op_AND_332)), DUP(op_AND_332)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_332)), DUP(op_AND_332))), CAST(8, MSB(DUP(op_AND_332)), DUP(op_AND_332)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_342))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_342))))); + RzILOpPure *op_ADD_348 = ADD(op_ADD_325, CAST(64, MSB(op_MUL_346), DUP(op_MUL_346))); + RzILOpPure *op_RSHIFT_357 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 16)); + RzILOpPure *op_AND_360 = LOGAND(op_RSHIFT_357, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_365 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_368 = LOGAND(op_RSHIFT_365, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_375 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_378 = LOGAND(op_RSHIFT_375, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_382 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_368), DUP(op_AND_368))), CAST(8, MSB(DUP(op_AND_368)), DUP(op_AND_368)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_368)), DUP(op_AND_368))), CAST(8, MSB(DUP(op_AND_368)), DUP(op_AND_368)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_378))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_378))))); + RzILOpPure *op_ADD_386 = ADD(CAST(64, MSB(CAST(32, MSB(CAST(16, MSB(op_AND_360), DUP(op_AND_360))), CAST(16, MSB(DUP(op_AND_360)), DUP(op_AND_360)))), CAST(32, MSB(CAST(16, MSB(DUP(op_AND_360)), DUP(op_AND_360))), CAST(16, MSB(DUP(op_AND_360)), DUP(op_AND_360)))), CAST(64, MSB(op_MUL_382), DUP(op_MUL_382))); + RzILOpPure *op_RSHIFT_390 = SHIFTRA(DUP(Rss), SN(32, 24)); + RzILOpPure *op_AND_393 = LOGAND(op_RSHIFT_390, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_400 = SHIFTRA(DUP(Rtt), SN(32, 24)); + RzILOpPure *op_AND_403 = LOGAND(op_RSHIFT_400, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_407 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_393), DUP(op_AND_393))), CAST(8, MSB(DUP(op_AND_393)), DUP(op_AND_393)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_393)), DUP(op_AND_393))), CAST(8, MSB(DUP(op_AND_393)), DUP(op_AND_393)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_403))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_403))))); + RzILOpPure *op_ADD_409 = ADD(op_ADD_386, CAST(64, MSB(op_MUL_407), DUP(op_MUL_407))); + RzILOpPure *op_EQ_410 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_348), SN(32, 0), SN(32, 16)), op_ADD_409); + RzILOpPure *op_RSHIFT_473 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 16)); + RzILOpPure *op_AND_476 = LOGAND(op_RSHIFT_473, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_481 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_484 = LOGAND(op_RSHIFT_481, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_491 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_494 = LOGAND(op_RSHIFT_491, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_498 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_484), DUP(op_AND_484))), CAST(8, MSB(DUP(op_AND_484)), DUP(op_AND_484)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_484)), DUP(op_AND_484))), CAST(8, MSB(DUP(op_AND_484)), DUP(op_AND_484)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_494))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_494))))); + RzILOpPure *op_ADD_502 = ADD(CAST(64, MSB(CAST(32, MSB(CAST(16, MSB(op_AND_476), DUP(op_AND_476))), CAST(16, MSB(DUP(op_AND_476)), DUP(op_AND_476)))), CAST(32, MSB(CAST(16, MSB(DUP(op_AND_476)), DUP(op_AND_476))), CAST(16, MSB(DUP(op_AND_476)), DUP(op_AND_476)))), CAST(64, MSB(op_MUL_498), DUP(op_MUL_498))); + RzILOpPure *op_RSHIFT_506 = SHIFTRA(DUP(Rss), SN(32, 24)); + RzILOpPure *op_AND_509 = LOGAND(op_RSHIFT_506, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_516 = SHIFTRA(DUP(Rtt), SN(32, 24)); + RzILOpPure *op_AND_519 = LOGAND(op_RSHIFT_516, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_523 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_509), DUP(op_AND_509))), CAST(8, MSB(DUP(op_AND_509)), DUP(op_AND_509)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_509)), DUP(op_AND_509))), CAST(8, MSB(DUP(op_AND_509)), DUP(op_AND_509)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_519))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_519))))); + RzILOpPure *op_ADD_525 = ADD(op_ADD_502, CAST(64, MSB(op_MUL_523), DUP(op_MUL_523))); + RzILOpPure *op_LT_528 = SLT(op_ADD_525, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_533 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_534 = NEG(op_LSHIFT_533); + RzILOpPure *op_LSHIFT_539 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_542 = SUB(op_LSHIFT_539, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_543 = ITE(op_LT_528, op_NEG_534, op_SUB_542); + RzILOpEffect *gcc_expr_544 = BRANCH(op_EQ_410, EMPTY(), set_usr_field_call_469); + + // h_tmp442 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ((Rxx >> 0x10) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x10) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x10) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x18) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x18) & ((st64) 0xff))))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rxx >> 0x10) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x10) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x10) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x18) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x18) & ((st64) 0xff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((st16) ((Rxx >> 0x10) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x10) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x10) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x18) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x18) & ((st64) 0xff)))))) < ((st64) 0x0)) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_546 = SETL("h_tmp442", cond_543); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ...; + RzILOpEffect *seq_547 = SEQN(2, gcc_expr_544, op_ASSIGN_hybrid_tmp_546); + + // Rxx = ((st64) (((ut64) (Rxx & (~(0xffff << 0x10)))) | (((ut64) (((sextract64(((ut64) ((st64) ((st32) ((st16) ((Rxx >> 0x10) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x10) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x10) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x18) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x18) & ((st64) 0xff))))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rxx >> 0x10) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x10) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x10) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x18) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x18) & ((st64) 0xff))))))) ? ((st64) ((st32) ((st16) ((Rxx >> 0x10) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x10) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x10) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x18) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x18) & ((st64) 0xff)))))) : h_tmp442) & ((st64) 0xffff))) << 0x10))); + RzILOpPure *op_LSHIFT_287 = SHIFTL0(SN(64, 0xffff), SN(32, 16)); + RzILOpPure *op_NOT_288 = LOGNOT(op_LSHIFT_287); + RzILOpPure *op_AND_289 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_288); + RzILOpPure *op_RSHIFT_414 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 16)); + RzILOpPure *op_AND_417 = LOGAND(op_RSHIFT_414, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_422 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_425 = LOGAND(op_RSHIFT_422, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_432 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_435 = LOGAND(op_RSHIFT_432, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_439 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_425), DUP(op_AND_425))), CAST(8, MSB(DUP(op_AND_425)), DUP(op_AND_425)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_425)), DUP(op_AND_425))), CAST(8, MSB(DUP(op_AND_425)), DUP(op_AND_425)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_435))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_435))))); + RzILOpPure *op_ADD_443 = ADD(CAST(64, MSB(CAST(32, MSB(CAST(16, MSB(op_AND_417), DUP(op_AND_417))), CAST(16, MSB(DUP(op_AND_417)), DUP(op_AND_417)))), CAST(32, MSB(CAST(16, MSB(DUP(op_AND_417)), DUP(op_AND_417))), CAST(16, MSB(DUP(op_AND_417)), DUP(op_AND_417)))), CAST(64, MSB(op_MUL_439), DUP(op_MUL_439))); + RzILOpPure *op_RSHIFT_447 = SHIFTRA(DUP(Rss), SN(32, 24)); + RzILOpPure *op_AND_450 = LOGAND(op_RSHIFT_447, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_457 = SHIFTRA(DUP(Rtt), SN(32, 24)); + RzILOpPure *op_AND_460 = LOGAND(op_RSHIFT_457, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_464 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_450), DUP(op_AND_450))), CAST(8, MSB(DUP(op_AND_450)), DUP(op_AND_450)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_450)), DUP(op_AND_450))), CAST(8, MSB(DUP(op_AND_450)), DUP(op_AND_450)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_460))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_460))))); + RzILOpPure *op_ADD_466 = ADD(op_ADD_443, CAST(64, MSB(op_MUL_464), DUP(op_MUL_464))); + RzILOpPure *cond_548 = ITE(DUP(op_EQ_410), op_ADD_466, VARL("h_tmp442")); + RzILOpPure *op_AND_551 = LOGAND(cond_548, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_556 = SHIFTL0(CAST(64, IL_FALSE, op_AND_551), SN(32, 16)); + RzILOpPure *op_OR_558 = LOGOR(CAST(64, IL_FALSE, op_AND_289), op_LSHIFT_556); + RzILOpEffect *op_ASSIGN_560 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_OR_558)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((s ...; + RzILOpEffect *seq_561 = SEQN(2, seq_547, op_ASSIGN_560); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_749 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ((Rxx >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x20) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x20) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x28) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x28) & ((st64) 0xff))))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rxx >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x20) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x20) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x28) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x28) & ((st64) 0xff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((st16) ((Rxx >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x20) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x20) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x28) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x28) & ((st64) 0xff)))))) < ((st64) 0x0)) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_576 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_579 = LOGAND(op_RSHIFT_576, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_584 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_587 = LOGAND(op_RSHIFT_584, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_594 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_597 = LOGAND(op_RSHIFT_594, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_601 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_587), DUP(op_AND_587))), CAST(8, MSB(DUP(op_AND_587)), DUP(op_AND_587)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_587)), DUP(op_AND_587))), CAST(8, MSB(DUP(op_AND_587)), DUP(op_AND_587)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_597))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_597))))); + RzILOpPure *op_ADD_605 = ADD(CAST(64, MSB(CAST(32, MSB(CAST(16, MSB(op_AND_579), DUP(op_AND_579))), CAST(16, MSB(DUP(op_AND_579)), DUP(op_AND_579)))), CAST(32, MSB(CAST(16, MSB(DUP(op_AND_579)), DUP(op_AND_579))), CAST(16, MSB(DUP(op_AND_579)), DUP(op_AND_579)))), CAST(64, MSB(op_MUL_601), DUP(op_MUL_601))); + RzILOpPure *op_RSHIFT_609 = SHIFTRA(DUP(Rss), SN(32, 0x28)); + RzILOpPure *op_AND_612 = LOGAND(op_RSHIFT_609, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_619 = SHIFTRA(DUP(Rtt), SN(32, 0x28)); + RzILOpPure *op_AND_622 = LOGAND(op_RSHIFT_619, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_626 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_612), DUP(op_AND_612))), CAST(8, MSB(DUP(op_AND_612)), DUP(op_AND_612)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_612)), DUP(op_AND_612))), CAST(8, MSB(DUP(op_AND_612)), DUP(op_AND_612)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_622))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_622))))); + RzILOpPure *op_ADD_628 = ADD(op_ADD_605, CAST(64, MSB(op_MUL_626), DUP(op_MUL_626))); + RzILOpPure *op_RSHIFT_637 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_640 = LOGAND(op_RSHIFT_637, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_645 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_648 = LOGAND(op_RSHIFT_645, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_655 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_658 = LOGAND(op_RSHIFT_655, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_662 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_648), DUP(op_AND_648))), CAST(8, MSB(DUP(op_AND_648)), DUP(op_AND_648)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_648)), DUP(op_AND_648))), CAST(8, MSB(DUP(op_AND_648)), DUP(op_AND_648)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_658))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_658))))); + RzILOpPure *op_ADD_666 = ADD(CAST(64, MSB(CAST(32, MSB(CAST(16, MSB(op_AND_640), DUP(op_AND_640))), CAST(16, MSB(DUP(op_AND_640)), DUP(op_AND_640)))), CAST(32, MSB(CAST(16, MSB(DUP(op_AND_640)), DUP(op_AND_640))), CAST(16, MSB(DUP(op_AND_640)), DUP(op_AND_640)))), CAST(64, MSB(op_MUL_662), DUP(op_MUL_662))); + RzILOpPure *op_RSHIFT_670 = SHIFTRA(DUP(Rss), SN(32, 0x28)); + RzILOpPure *op_AND_673 = LOGAND(op_RSHIFT_670, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_680 = SHIFTRA(DUP(Rtt), SN(32, 0x28)); + RzILOpPure *op_AND_683 = LOGAND(op_RSHIFT_680, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_687 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_673), DUP(op_AND_673))), CAST(8, MSB(DUP(op_AND_673)), DUP(op_AND_673)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_673)), DUP(op_AND_673))), CAST(8, MSB(DUP(op_AND_673)), DUP(op_AND_673)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_683))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_683))))); + RzILOpPure *op_ADD_689 = ADD(op_ADD_666, CAST(64, MSB(op_MUL_687), DUP(op_MUL_687))); + RzILOpPure *op_EQ_690 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_628), SN(32, 0), SN(32, 16)), op_ADD_689); + RzILOpPure *op_RSHIFT_753 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_756 = LOGAND(op_RSHIFT_753, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_761 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_764 = LOGAND(op_RSHIFT_761, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_771 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_774 = LOGAND(op_RSHIFT_771, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_778 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_764), DUP(op_AND_764))), CAST(8, MSB(DUP(op_AND_764)), DUP(op_AND_764)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_764)), DUP(op_AND_764))), CAST(8, MSB(DUP(op_AND_764)), DUP(op_AND_764)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_774))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_774))))); + RzILOpPure *op_ADD_782 = ADD(CAST(64, MSB(CAST(32, MSB(CAST(16, MSB(op_AND_756), DUP(op_AND_756))), CAST(16, MSB(DUP(op_AND_756)), DUP(op_AND_756)))), CAST(32, MSB(CAST(16, MSB(DUP(op_AND_756)), DUP(op_AND_756))), CAST(16, MSB(DUP(op_AND_756)), DUP(op_AND_756)))), CAST(64, MSB(op_MUL_778), DUP(op_MUL_778))); + RzILOpPure *op_RSHIFT_786 = SHIFTRA(DUP(Rss), SN(32, 0x28)); + RzILOpPure *op_AND_789 = LOGAND(op_RSHIFT_786, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_796 = SHIFTRA(DUP(Rtt), SN(32, 0x28)); + RzILOpPure *op_AND_799 = LOGAND(op_RSHIFT_796, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_803 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_789), DUP(op_AND_789))), CAST(8, MSB(DUP(op_AND_789)), DUP(op_AND_789)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_789)), DUP(op_AND_789))), CAST(8, MSB(DUP(op_AND_789)), DUP(op_AND_789)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_799))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_799))))); + RzILOpPure *op_ADD_805 = ADD(op_ADD_782, CAST(64, MSB(op_MUL_803), DUP(op_MUL_803))); + RzILOpPure *op_LT_808 = SLT(op_ADD_805, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_813 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_814 = NEG(op_LSHIFT_813); + RzILOpPure *op_LSHIFT_819 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_822 = SUB(op_LSHIFT_819, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_823 = ITE(op_LT_808, op_NEG_814, op_SUB_822); + RzILOpEffect *gcc_expr_824 = BRANCH(op_EQ_690, EMPTY(), set_usr_field_call_749); + + // h_tmp443 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ((Rxx >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x20) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x20) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x28) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x28) & ((st64) 0xff))))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rxx >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x20) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x20) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x28) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x28) & ((st64) 0xff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((st16) ((Rxx >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x20) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x20) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x28) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x28) & ((st64) 0xff)))))) < ((st64) 0x0)) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_826 = SETL("h_tmp443", cond_823); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ...; + RzILOpEffect *seq_827 = SEQN(2, gcc_expr_824, op_ASSIGN_hybrid_tmp_826); + + // Rxx = ((st64) (((ut64) (Rxx & (~(0xffff << 0x20)))) | (((ut64) (((sextract64(((ut64) ((st64) ((st32) ((st16) ((Rxx >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x20) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x20) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x28) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x28) & ((st64) 0xff))))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rxx >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x20) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x20) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x28) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x28) & ((st64) 0xff))))))) ? ((st64) ((st32) ((st16) ((Rxx >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x20) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x20) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x28) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x28) & ((st64) 0xff)))))) : h_tmp443) & ((st64) 0xffff))) << 0x20))); + RzILOpPure *op_LSHIFT_567 = SHIFTL0(SN(64, 0xffff), SN(32, 0x20)); + RzILOpPure *op_NOT_568 = LOGNOT(op_LSHIFT_567); + RzILOpPure *op_AND_569 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_568); + RzILOpPure *op_RSHIFT_694 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_697 = LOGAND(op_RSHIFT_694, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_702 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_705 = LOGAND(op_RSHIFT_702, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_712 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_715 = LOGAND(op_RSHIFT_712, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_719 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_705), DUP(op_AND_705))), CAST(8, MSB(DUP(op_AND_705)), DUP(op_AND_705)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_705)), DUP(op_AND_705))), CAST(8, MSB(DUP(op_AND_705)), DUP(op_AND_705)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_715))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_715))))); + RzILOpPure *op_ADD_723 = ADD(CAST(64, MSB(CAST(32, MSB(CAST(16, MSB(op_AND_697), DUP(op_AND_697))), CAST(16, MSB(DUP(op_AND_697)), DUP(op_AND_697)))), CAST(32, MSB(CAST(16, MSB(DUP(op_AND_697)), DUP(op_AND_697))), CAST(16, MSB(DUP(op_AND_697)), DUP(op_AND_697)))), CAST(64, MSB(op_MUL_719), DUP(op_MUL_719))); + RzILOpPure *op_RSHIFT_727 = SHIFTRA(DUP(Rss), SN(32, 0x28)); + RzILOpPure *op_AND_730 = LOGAND(op_RSHIFT_727, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_737 = SHIFTRA(DUP(Rtt), SN(32, 0x28)); + RzILOpPure *op_AND_740 = LOGAND(op_RSHIFT_737, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_744 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_730), DUP(op_AND_730))), CAST(8, MSB(DUP(op_AND_730)), DUP(op_AND_730)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_730)), DUP(op_AND_730))), CAST(8, MSB(DUP(op_AND_730)), DUP(op_AND_730)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_740))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_740))))); + RzILOpPure *op_ADD_746 = ADD(op_ADD_723, CAST(64, MSB(op_MUL_744), DUP(op_MUL_744))); + RzILOpPure *cond_828 = ITE(DUP(op_EQ_690), op_ADD_746, VARL("h_tmp443")); + RzILOpPure *op_AND_831 = LOGAND(cond_828, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_836 = SHIFTL0(CAST(64, IL_FALSE, op_AND_831), SN(32, 0x20)); + RzILOpPure *op_OR_838 = LOGOR(CAST(64, IL_FALSE, op_AND_569), op_LSHIFT_836); + RzILOpEffect *op_ASSIGN_840 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_OR_838)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((s ...; + RzILOpEffect *seq_841 = SEQN(2, seq_827, op_ASSIGN_840); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_1029 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ((Rxx >> 0x30) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x30) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x30) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x38) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x38) & ((st64) 0xff))))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rxx >> 0x30) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x30) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x30) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x38) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x38) & ((st64) 0xff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((st16) ((Rxx >> 0x30) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x30) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x30) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x38) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x38) & ((st64) 0xff)))))) < ((st64) 0x0)) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_856 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x30)); + RzILOpPure *op_AND_859 = LOGAND(op_RSHIFT_856, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_864 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_867 = LOGAND(op_RSHIFT_864, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_874 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_877 = LOGAND(op_RSHIFT_874, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_881 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_867), DUP(op_AND_867))), CAST(8, MSB(DUP(op_AND_867)), DUP(op_AND_867)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_867)), DUP(op_AND_867))), CAST(8, MSB(DUP(op_AND_867)), DUP(op_AND_867)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_877))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_877))))); + RzILOpPure *op_ADD_885 = ADD(CAST(64, MSB(CAST(32, MSB(CAST(16, MSB(op_AND_859), DUP(op_AND_859))), CAST(16, MSB(DUP(op_AND_859)), DUP(op_AND_859)))), CAST(32, MSB(CAST(16, MSB(DUP(op_AND_859)), DUP(op_AND_859))), CAST(16, MSB(DUP(op_AND_859)), DUP(op_AND_859)))), CAST(64, MSB(op_MUL_881), DUP(op_MUL_881))); + RzILOpPure *op_RSHIFT_889 = SHIFTRA(DUP(Rss), SN(32, 0x38)); + RzILOpPure *op_AND_892 = LOGAND(op_RSHIFT_889, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_899 = SHIFTRA(DUP(Rtt), SN(32, 0x38)); + RzILOpPure *op_AND_902 = LOGAND(op_RSHIFT_899, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_906 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_892), DUP(op_AND_892))), CAST(8, MSB(DUP(op_AND_892)), DUP(op_AND_892)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_892)), DUP(op_AND_892))), CAST(8, MSB(DUP(op_AND_892)), DUP(op_AND_892)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_902))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_902))))); + RzILOpPure *op_ADD_908 = ADD(op_ADD_885, CAST(64, MSB(op_MUL_906), DUP(op_MUL_906))); + RzILOpPure *op_RSHIFT_917 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x30)); + RzILOpPure *op_AND_920 = LOGAND(op_RSHIFT_917, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_925 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_928 = LOGAND(op_RSHIFT_925, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_935 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_938 = LOGAND(op_RSHIFT_935, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_942 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_928), DUP(op_AND_928))), CAST(8, MSB(DUP(op_AND_928)), DUP(op_AND_928)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_928)), DUP(op_AND_928))), CAST(8, MSB(DUP(op_AND_928)), DUP(op_AND_928)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_938))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_938))))); + RzILOpPure *op_ADD_946 = ADD(CAST(64, MSB(CAST(32, MSB(CAST(16, MSB(op_AND_920), DUP(op_AND_920))), CAST(16, MSB(DUP(op_AND_920)), DUP(op_AND_920)))), CAST(32, MSB(CAST(16, MSB(DUP(op_AND_920)), DUP(op_AND_920))), CAST(16, MSB(DUP(op_AND_920)), DUP(op_AND_920)))), CAST(64, MSB(op_MUL_942), DUP(op_MUL_942))); + RzILOpPure *op_RSHIFT_950 = SHIFTRA(DUP(Rss), SN(32, 0x38)); + RzILOpPure *op_AND_953 = LOGAND(op_RSHIFT_950, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_960 = SHIFTRA(DUP(Rtt), SN(32, 0x38)); + RzILOpPure *op_AND_963 = LOGAND(op_RSHIFT_960, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_967 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_953), DUP(op_AND_953))), CAST(8, MSB(DUP(op_AND_953)), DUP(op_AND_953)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_953)), DUP(op_AND_953))), CAST(8, MSB(DUP(op_AND_953)), DUP(op_AND_953)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_963))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_963))))); + RzILOpPure *op_ADD_969 = ADD(op_ADD_946, CAST(64, MSB(op_MUL_967), DUP(op_MUL_967))); + RzILOpPure *op_EQ_970 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_908), SN(32, 0), SN(32, 16)), op_ADD_969); + RzILOpPure *op_RSHIFT_1033 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x30)); + RzILOpPure *op_AND_1036 = LOGAND(op_RSHIFT_1033, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_1041 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_1044 = LOGAND(op_RSHIFT_1041, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_1051 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_1054 = LOGAND(op_RSHIFT_1051, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_1058 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_1044), DUP(op_AND_1044))), CAST(8, MSB(DUP(op_AND_1044)), DUP(op_AND_1044)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_1044)), DUP(op_AND_1044))), CAST(8, MSB(DUP(op_AND_1044)), DUP(op_AND_1044)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_1054))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_1054))))); + RzILOpPure *op_ADD_1062 = ADD(CAST(64, MSB(CAST(32, MSB(CAST(16, MSB(op_AND_1036), DUP(op_AND_1036))), CAST(16, MSB(DUP(op_AND_1036)), DUP(op_AND_1036)))), CAST(32, MSB(CAST(16, MSB(DUP(op_AND_1036)), DUP(op_AND_1036))), CAST(16, MSB(DUP(op_AND_1036)), DUP(op_AND_1036)))), CAST(64, MSB(op_MUL_1058), DUP(op_MUL_1058))); + RzILOpPure *op_RSHIFT_1066 = SHIFTRA(DUP(Rss), SN(32, 0x38)); + RzILOpPure *op_AND_1069 = LOGAND(op_RSHIFT_1066, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_1076 = SHIFTRA(DUP(Rtt), SN(32, 0x38)); + RzILOpPure *op_AND_1079 = LOGAND(op_RSHIFT_1076, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_1083 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_1069), DUP(op_AND_1069))), CAST(8, MSB(DUP(op_AND_1069)), DUP(op_AND_1069)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_1069)), DUP(op_AND_1069))), CAST(8, MSB(DUP(op_AND_1069)), DUP(op_AND_1069)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_1079))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_1079))))); + RzILOpPure *op_ADD_1085 = ADD(op_ADD_1062, CAST(64, MSB(op_MUL_1083), DUP(op_MUL_1083))); + RzILOpPure *op_LT_1088 = SLT(op_ADD_1085, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_1093 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_1094 = NEG(op_LSHIFT_1093); + RzILOpPure *op_LSHIFT_1099 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_1102 = SUB(op_LSHIFT_1099, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_1103 = ITE(op_LT_1088, op_NEG_1094, op_SUB_1102); + RzILOpEffect *gcc_expr_1104 = BRANCH(op_EQ_970, EMPTY(), set_usr_field_call_1029); + + // h_tmp444 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ((Rxx >> 0x30) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x30) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x30) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x38) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x38) & ((st64) 0xff))))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rxx >> 0x30) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x30) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x30) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x38) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x38) & ((st64) 0xff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((st16) ((Rxx >> 0x30) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x30) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x30) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x38) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x38) & ((st64) 0xff)))))) < ((st64) 0x0)) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_1106 = SETL("h_tmp444", cond_1103); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ...; + RzILOpEffect *seq_1107 = SEQN(2, gcc_expr_1104, op_ASSIGN_hybrid_tmp_1106); + + // Rxx = ((st64) (((ut64) (Rxx & (~(0xffff << 0x30)))) | (((ut64) (((sextract64(((ut64) ((st64) ((st32) ((st16) ((Rxx >> 0x30) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x30) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x30) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x38) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x38) & ((st64) 0xff))))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rxx >> 0x30) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x30) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x30) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x38) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x38) & ((st64) 0xff))))))) ? ((st64) ((st32) ((st16) ((Rxx >> 0x30) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x30) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x30) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x38) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x38) & ((st64) 0xff)))))) : h_tmp444) & ((st64) 0xffff))) << 0x30))); + RzILOpPure *op_LSHIFT_847 = SHIFTL0(SN(64, 0xffff), SN(32, 0x30)); + RzILOpPure *op_NOT_848 = LOGNOT(op_LSHIFT_847); + RzILOpPure *op_AND_849 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_848); + RzILOpPure *op_RSHIFT_974 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x30)); + RzILOpPure *op_AND_977 = LOGAND(op_RSHIFT_974, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_982 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_985 = LOGAND(op_RSHIFT_982, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_992 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_995 = LOGAND(op_RSHIFT_992, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_999 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_985), DUP(op_AND_985))), CAST(8, MSB(DUP(op_AND_985)), DUP(op_AND_985)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_985)), DUP(op_AND_985))), CAST(8, MSB(DUP(op_AND_985)), DUP(op_AND_985)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_995))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_995))))); + RzILOpPure *op_ADD_1003 = ADD(CAST(64, MSB(CAST(32, MSB(CAST(16, MSB(op_AND_977), DUP(op_AND_977))), CAST(16, MSB(DUP(op_AND_977)), DUP(op_AND_977)))), CAST(32, MSB(CAST(16, MSB(DUP(op_AND_977)), DUP(op_AND_977))), CAST(16, MSB(DUP(op_AND_977)), DUP(op_AND_977)))), CAST(64, MSB(op_MUL_999), DUP(op_MUL_999))); + RzILOpPure *op_RSHIFT_1007 = SHIFTRA(DUP(Rss), SN(32, 0x38)); + RzILOpPure *op_AND_1010 = LOGAND(op_RSHIFT_1007, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_1017 = SHIFTRA(DUP(Rtt), SN(32, 0x38)); + RzILOpPure *op_AND_1020 = LOGAND(op_RSHIFT_1017, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_1024 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_1010), DUP(op_AND_1010))), CAST(8, MSB(DUP(op_AND_1010)), DUP(op_AND_1010)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_1010)), DUP(op_AND_1010))), CAST(8, MSB(DUP(op_AND_1010)), DUP(op_AND_1010)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_1020))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_1020))))); + RzILOpPure *op_ADD_1026 = ADD(op_ADD_1003, CAST(64, MSB(op_MUL_1024), DUP(op_MUL_1024))); + RzILOpPure *cond_1108 = ITE(DUP(op_EQ_970), op_ADD_1026, VARL("h_tmp444")); + RzILOpPure *op_AND_1111 = LOGAND(cond_1108, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_1116 = SHIFTL0(CAST(64, IL_FALSE, op_AND_1111), SN(32, 0x30)); + RzILOpPure *op_OR_1118 = LOGOR(CAST(64, IL_FALSE, op_AND_849), op_LSHIFT_1116); + RzILOpEffect *op_ASSIGN_1120 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_OR_1118)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((s ...; + RzILOpEffect *seq_1121 = SEQN(2, seq_1107, op_ASSIGN_1120); + + RzILOpEffect *instruction_sequence = SEQN(4, seq_281, seq_561, seq_841, seq_1121); + return instruction_sequence; +} + +// Rdd = vdmpybsu(Rss,Rtt):sat +RzILOpEffect *hex_il_op_m5_vdmpybsu(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_156 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x0) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x0) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x8) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x8) & ((st64) 0xff))))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x0) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x0) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x8) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x8) & ((st64) 0xff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((st16) ((st8) ((Rss >> 0x0) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x0) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x8) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x8) & ((st64) 0xff)))))) < ((st64) 0x0)) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_15, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_26 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_29 = LOGAND(op_RSHIFT_26, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_33 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_18), DUP(op_AND_18))), CAST(8, MSB(DUP(op_AND_18)), DUP(op_AND_18)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(8, MSB(DUP(op_AND_18)), DUP(op_AND_18)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_29))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_29))))); + RzILOpPure *op_RSHIFT_38 = SHIFTRA(DUP(Rss), SN(32, 8)); + RzILOpPure *op_AND_41 = LOGAND(op_RSHIFT_38, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_48 = SHIFTRA(DUP(Rtt), SN(32, 8)); + RzILOpPure *op_AND_51 = LOGAND(op_RSHIFT_48, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_55 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_41), DUP(op_AND_41))), CAST(8, MSB(DUP(op_AND_41)), DUP(op_AND_41)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_41)), DUP(op_AND_41))), CAST(8, MSB(DUP(op_AND_41)), DUP(op_AND_41)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_51))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_51))))); + RzILOpPure *op_ADD_57 = ADD(CAST(64, MSB(op_MUL_33), DUP(op_MUL_33)), CAST(64, MSB(op_MUL_55), DUP(op_MUL_55))); + RzILOpPure *op_RSHIFT_66 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_69 = LOGAND(op_RSHIFT_66, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_76 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_79 = LOGAND(op_RSHIFT_76, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_83 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_69), DUP(op_AND_69))), CAST(8, MSB(DUP(op_AND_69)), DUP(op_AND_69)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_69)), DUP(op_AND_69))), CAST(8, MSB(DUP(op_AND_69)), DUP(op_AND_69)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_79))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_79))))); + RzILOpPure *op_RSHIFT_88 = SHIFTRA(DUP(Rss), SN(32, 8)); + RzILOpPure *op_AND_91 = LOGAND(op_RSHIFT_88, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_98 = SHIFTRA(DUP(Rtt), SN(32, 8)); + RzILOpPure *op_AND_101 = LOGAND(op_RSHIFT_98, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_105 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_91), DUP(op_AND_91))), CAST(8, MSB(DUP(op_AND_91)), DUP(op_AND_91)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_91)), DUP(op_AND_91))), CAST(8, MSB(DUP(op_AND_91)), DUP(op_AND_91)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_101))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_101))))); + RzILOpPure *op_ADD_107 = ADD(CAST(64, MSB(op_MUL_83), DUP(op_MUL_83)), CAST(64, MSB(op_MUL_105), DUP(op_MUL_105))); + RzILOpPure *op_EQ_108 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_57), SN(32, 0), SN(32, 16)), op_ADD_107); + RzILOpPure *op_RSHIFT_160 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_163 = LOGAND(op_RSHIFT_160, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_170 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_173 = LOGAND(op_RSHIFT_170, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_177 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_163), DUP(op_AND_163))), CAST(8, MSB(DUP(op_AND_163)), DUP(op_AND_163)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_163)), DUP(op_AND_163))), CAST(8, MSB(DUP(op_AND_163)), DUP(op_AND_163)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_173))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_173))))); + RzILOpPure *op_RSHIFT_182 = SHIFTRA(DUP(Rss), SN(32, 8)); + RzILOpPure *op_AND_185 = LOGAND(op_RSHIFT_182, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_192 = SHIFTRA(DUP(Rtt), SN(32, 8)); + RzILOpPure *op_AND_195 = LOGAND(op_RSHIFT_192, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_199 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_185), DUP(op_AND_185))), CAST(8, MSB(DUP(op_AND_185)), DUP(op_AND_185)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_185)), DUP(op_AND_185))), CAST(8, MSB(DUP(op_AND_185)), DUP(op_AND_185)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_195))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_195))))); + RzILOpPure *op_ADD_201 = ADD(CAST(64, MSB(op_MUL_177), DUP(op_MUL_177)), CAST(64, MSB(op_MUL_199), DUP(op_MUL_199))); + RzILOpPure *op_LT_204 = SLT(op_ADD_201, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_209 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_210 = NEG(op_LSHIFT_209); + RzILOpPure *op_LSHIFT_215 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_218 = SUB(op_LSHIFT_215, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_219 = ITE(op_LT_204, op_NEG_210, op_SUB_218); + RzILOpEffect *gcc_expr_220 = BRANCH(op_EQ_108, EMPTY(), set_usr_field_call_156); + + // h_tmp445 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x0) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x0) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x8) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x8) & ((st64) 0xff))))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x0) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x0) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x8) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x8) & ((st64) 0xff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((st16) ((st8) ((Rss >> 0x0) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x0) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x8) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x8) & ((st64) 0xff)))))) < ((st64) 0x0)) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_222 = SETL("h_tmp445", cond_219); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ...; + RzILOpEffect *seq_223 = SEQN(2, gcc_expr_220, op_ASSIGN_hybrid_tmp_222); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x0)))) | (((ut64) (((sextract64(((ut64) ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x0) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x0) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x8) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x8) & ((st64) 0xff))))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x0) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x0) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x8) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x8) & ((st64) 0xff))))))) ? ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x0) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x0) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x8) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x8) & ((st64) 0xff)))))) : h_tmp445) & ((st64) 0xffff))) << 0x0))); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_112 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_115 = LOGAND(op_RSHIFT_112, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_122 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_125 = LOGAND(op_RSHIFT_122, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_129 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_115), DUP(op_AND_115))), CAST(8, MSB(DUP(op_AND_115)), DUP(op_AND_115)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_115)), DUP(op_AND_115))), CAST(8, MSB(DUP(op_AND_115)), DUP(op_AND_115)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_125))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_125))))); + RzILOpPure *op_RSHIFT_134 = SHIFTRA(DUP(Rss), SN(32, 8)); + RzILOpPure *op_AND_137 = LOGAND(op_RSHIFT_134, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_144 = SHIFTRA(DUP(Rtt), SN(32, 8)); + RzILOpPure *op_AND_147 = LOGAND(op_RSHIFT_144, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_151 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_137), DUP(op_AND_137))), CAST(8, MSB(DUP(op_AND_137)), DUP(op_AND_137)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_137)), DUP(op_AND_137))), CAST(8, MSB(DUP(op_AND_137)), DUP(op_AND_137)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_147))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_147))))); + RzILOpPure *op_ADD_153 = ADD(CAST(64, MSB(op_MUL_129), DUP(op_MUL_129)), CAST(64, MSB(op_MUL_151), DUP(op_MUL_151))); + RzILOpPure *cond_224 = ITE(DUP(op_EQ_108), op_ADD_153, VARL("h_tmp445")); + RzILOpPure *op_AND_227 = LOGAND(cond_224, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_232 = SHIFTL0(CAST(64, IL_FALSE, op_AND_227), SN(32, 0)); + RzILOpPure *op_OR_234 = LOGOR(CAST(64, IL_FALSE, op_AND_7), op_LSHIFT_232); + RzILOpEffect *op_ASSIGN_236 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_234)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((s ...; + RzILOpEffect *seq_237 = SEQN(2, seq_223, op_ASSIGN_236); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_392 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x10) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x10) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x18) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x18) & ((st64) 0xff))))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x10) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x10) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x18) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x18) & ((st64) 0xff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((st16) ((st8) ((Rss >> 0x10) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x10) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x18) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x18) & ((st64) 0xff)))))) < ((st64) 0x0)) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_252 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_255 = LOGAND(op_RSHIFT_252, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_262 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_265 = LOGAND(op_RSHIFT_262, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_269 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_255), DUP(op_AND_255))), CAST(8, MSB(DUP(op_AND_255)), DUP(op_AND_255)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_255)), DUP(op_AND_255))), CAST(8, MSB(DUP(op_AND_255)), DUP(op_AND_255)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_265))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_265))))); + RzILOpPure *op_RSHIFT_274 = SHIFTRA(DUP(Rss), SN(32, 24)); + RzILOpPure *op_AND_277 = LOGAND(op_RSHIFT_274, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_284 = SHIFTRA(DUP(Rtt), SN(32, 24)); + RzILOpPure *op_AND_287 = LOGAND(op_RSHIFT_284, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_291 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_277), DUP(op_AND_277))), CAST(8, MSB(DUP(op_AND_277)), DUP(op_AND_277)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_277)), DUP(op_AND_277))), CAST(8, MSB(DUP(op_AND_277)), DUP(op_AND_277)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_287))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_287))))); + RzILOpPure *op_ADD_293 = ADD(CAST(64, MSB(op_MUL_269), DUP(op_MUL_269)), CAST(64, MSB(op_MUL_291), DUP(op_MUL_291))); + RzILOpPure *op_RSHIFT_302 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_305 = LOGAND(op_RSHIFT_302, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_312 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_315 = LOGAND(op_RSHIFT_312, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_319 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_305), DUP(op_AND_305))), CAST(8, MSB(DUP(op_AND_305)), DUP(op_AND_305)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_305)), DUP(op_AND_305))), CAST(8, MSB(DUP(op_AND_305)), DUP(op_AND_305)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_315))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_315))))); + RzILOpPure *op_RSHIFT_324 = SHIFTRA(DUP(Rss), SN(32, 24)); + RzILOpPure *op_AND_327 = LOGAND(op_RSHIFT_324, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_334 = SHIFTRA(DUP(Rtt), SN(32, 24)); + RzILOpPure *op_AND_337 = LOGAND(op_RSHIFT_334, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_341 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_327), DUP(op_AND_327))), CAST(8, MSB(DUP(op_AND_327)), DUP(op_AND_327)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_327)), DUP(op_AND_327))), CAST(8, MSB(DUP(op_AND_327)), DUP(op_AND_327)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_337))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_337))))); + RzILOpPure *op_ADD_343 = ADD(CAST(64, MSB(op_MUL_319), DUP(op_MUL_319)), CAST(64, MSB(op_MUL_341), DUP(op_MUL_341))); + RzILOpPure *op_EQ_344 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_293), SN(32, 0), SN(32, 16)), op_ADD_343); + RzILOpPure *op_RSHIFT_396 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_399 = LOGAND(op_RSHIFT_396, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_406 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_409 = LOGAND(op_RSHIFT_406, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_413 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_399), DUP(op_AND_399))), CAST(8, MSB(DUP(op_AND_399)), DUP(op_AND_399)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_399)), DUP(op_AND_399))), CAST(8, MSB(DUP(op_AND_399)), DUP(op_AND_399)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_409))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_409))))); + RzILOpPure *op_RSHIFT_418 = SHIFTRA(DUP(Rss), SN(32, 24)); + RzILOpPure *op_AND_421 = LOGAND(op_RSHIFT_418, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_428 = SHIFTRA(DUP(Rtt), SN(32, 24)); + RzILOpPure *op_AND_431 = LOGAND(op_RSHIFT_428, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_435 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_421), DUP(op_AND_421))), CAST(8, MSB(DUP(op_AND_421)), DUP(op_AND_421)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_421)), DUP(op_AND_421))), CAST(8, MSB(DUP(op_AND_421)), DUP(op_AND_421)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_431))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_431))))); + RzILOpPure *op_ADD_437 = ADD(CAST(64, MSB(op_MUL_413), DUP(op_MUL_413)), CAST(64, MSB(op_MUL_435), DUP(op_MUL_435))); + RzILOpPure *op_LT_440 = SLT(op_ADD_437, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_445 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_446 = NEG(op_LSHIFT_445); + RzILOpPure *op_LSHIFT_451 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_454 = SUB(op_LSHIFT_451, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_455 = ITE(op_LT_440, op_NEG_446, op_SUB_454); + RzILOpEffect *gcc_expr_456 = BRANCH(op_EQ_344, EMPTY(), set_usr_field_call_392); + + // h_tmp446 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x10) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x10) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x18) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x18) & ((st64) 0xff))))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x10) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x10) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x18) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x18) & ((st64) 0xff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((st16) ((st8) ((Rss >> 0x10) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x10) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x18) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x18) & ((st64) 0xff)))))) < ((st64) 0x0)) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_458 = SETL("h_tmp446", cond_455); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ...; + RzILOpEffect *seq_459 = SEQN(2, gcc_expr_456, op_ASSIGN_hybrid_tmp_458); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x10)))) | (((ut64) (((sextract64(((ut64) ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x10) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x10) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x18) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x18) & ((st64) 0xff))))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x10) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x10) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x18) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x18) & ((st64) 0xff))))))) ? ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x10) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x10) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x18) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x18) & ((st64) 0xff)))))) : h_tmp446) & ((st64) 0xffff))) << 0x10))); + RzILOpPure *op_LSHIFT_243 = SHIFTL0(SN(64, 0xffff), SN(32, 16)); + RzILOpPure *op_NOT_244 = LOGNOT(op_LSHIFT_243); + RzILOpPure *op_AND_245 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_244); + RzILOpPure *op_RSHIFT_348 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_351 = LOGAND(op_RSHIFT_348, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_358 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_361 = LOGAND(op_RSHIFT_358, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_365 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_351), DUP(op_AND_351))), CAST(8, MSB(DUP(op_AND_351)), DUP(op_AND_351)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_351)), DUP(op_AND_351))), CAST(8, MSB(DUP(op_AND_351)), DUP(op_AND_351)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_361))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_361))))); + RzILOpPure *op_RSHIFT_370 = SHIFTRA(DUP(Rss), SN(32, 24)); + RzILOpPure *op_AND_373 = LOGAND(op_RSHIFT_370, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_380 = SHIFTRA(DUP(Rtt), SN(32, 24)); + RzILOpPure *op_AND_383 = LOGAND(op_RSHIFT_380, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_387 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_373), DUP(op_AND_373))), CAST(8, MSB(DUP(op_AND_373)), DUP(op_AND_373)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_373)), DUP(op_AND_373))), CAST(8, MSB(DUP(op_AND_373)), DUP(op_AND_373)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_383))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_383))))); + RzILOpPure *op_ADD_389 = ADD(CAST(64, MSB(op_MUL_365), DUP(op_MUL_365)), CAST(64, MSB(op_MUL_387), DUP(op_MUL_387))); + RzILOpPure *cond_460 = ITE(DUP(op_EQ_344), op_ADD_389, VARL("h_tmp446")); + RzILOpPure *op_AND_463 = LOGAND(cond_460, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_468 = SHIFTL0(CAST(64, IL_FALSE, op_AND_463), SN(32, 16)); + RzILOpPure *op_OR_470 = LOGOR(CAST(64, IL_FALSE, op_AND_245), op_LSHIFT_468); + RzILOpEffect *op_ASSIGN_472 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_470)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((s ...; + RzILOpEffect *seq_473 = SEQN(2, seq_459, op_ASSIGN_472); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_628 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x20) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x20) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x28) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x28) & ((st64) 0xff))))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x20) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x20) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x28) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x28) & ((st64) 0xff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((st16) ((st8) ((Rss >> 0x20) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x20) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x28) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x28) & ((st64) 0xff)))))) < ((st64) 0x0)) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_488 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_491 = LOGAND(op_RSHIFT_488, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_498 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_501 = LOGAND(op_RSHIFT_498, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_505 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_491), DUP(op_AND_491))), CAST(8, MSB(DUP(op_AND_491)), DUP(op_AND_491)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_491)), DUP(op_AND_491))), CAST(8, MSB(DUP(op_AND_491)), DUP(op_AND_491)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_501))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_501))))); + RzILOpPure *op_RSHIFT_510 = SHIFTRA(DUP(Rss), SN(32, 0x28)); + RzILOpPure *op_AND_513 = LOGAND(op_RSHIFT_510, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_520 = SHIFTRA(DUP(Rtt), SN(32, 0x28)); + RzILOpPure *op_AND_523 = LOGAND(op_RSHIFT_520, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_527 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_513), DUP(op_AND_513))), CAST(8, MSB(DUP(op_AND_513)), DUP(op_AND_513)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_513)), DUP(op_AND_513))), CAST(8, MSB(DUP(op_AND_513)), DUP(op_AND_513)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_523))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_523))))); + RzILOpPure *op_ADD_529 = ADD(CAST(64, MSB(op_MUL_505), DUP(op_MUL_505)), CAST(64, MSB(op_MUL_527), DUP(op_MUL_527))); + RzILOpPure *op_RSHIFT_538 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_541 = LOGAND(op_RSHIFT_538, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_548 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_551 = LOGAND(op_RSHIFT_548, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_555 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_541), DUP(op_AND_541))), CAST(8, MSB(DUP(op_AND_541)), DUP(op_AND_541)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_541)), DUP(op_AND_541))), CAST(8, MSB(DUP(op_AND_541)), DUP(op_AND_541)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_551))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_551))))); + RzILOpPure *op_RSHIFT_560 = SHIFTRA(DUP(Rss), SN(32, 0x28)); + RzILOpPure *op_AND_563 = LOGAND(op_RSHIFT_560, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_570 = SHIFTRA(DUP(Rtt), SN(32, 0x28)); + RzILOpPure *op_AND_573 = LOGAND(op_RSHIFT_570, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_577 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_563), DUP(op_AND_563))), CAST(8, MSB(DUP(op_AND_563)), DUP(op_AND_563)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_563)), DUP(op_AND_563))), CAST(8, MSB(DUP(op_AND_563)), DUP(op_AND_563)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_573))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_573))))); + RzILOpPure *op_ADD_579 = ADD(CAST(64, MSB(op_MUL_555), DUP(op_MUL_555)), CAST(64, MSB(op_MUL_577), DUP(op_MUL_577))); + RzILOpPure *op_EQ_580 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_529), SN(32, 0), SN(32, 16)), op_ADD_579); + RzILOpPure *op_RSHIFT_632 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_635 = LOGAND(op_RSHIFT_632, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_642 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_645 = LOGAND(op_RSHIFT_642, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_649 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_635), DUP(op_AND_635))), CAST(8, MSB(DUP(op_AND_635)), DUP(op_AND_635)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_635)), DUP(op_AND_635))), CAST(8, MSB(DUP(op_AND_635)), DUP(op_AND_635)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_645))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_645))))); + RzILOpPure *op_RSHIFT_654 = SHIFTRA(DUP(Rss), SN(32, 0x28)); + RzILOpPure *op_AND_657 = LOGAND(op_RSHIFT_654, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_664 = SHIFTRA(DUP(Rtt), SN(32, 0x28)); + RzILOpPure *op_AND_667 = LOGAND(op_RSHIFT_664, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_671 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_657), DUP(op_AND_657))), CAST(8, MSB(DUP(op_AND_657)), DUP(op_AND_657)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_657)), DUP(op_AND_657))), CAST(8, MSB(DUP(op_AND_657)), DUP(op_AND_657)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_667))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_667))))); + RzILOpPure *op_ADD_673 = ADD(CAST(64, MSB(op_MUL_649), DUP(op_MUL_649)), CAST(64, MSB(op_MUL_671), DUP(op_MUL_671))); + RzILOpPure *op_LT_676 = SLT(op_ADD_673, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_681 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_682 = NEG(op_LSHIFT_681); + RzILOpPure *op_LSHIFT_687 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_690 = SUB(op_LSHIFT_687, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_691 = ITE(op_LT_676, op_NEG_682, op_SUB_690); + RzILOpEffect *gcc_expr_692 = BRANCH(op_EQ_580, EMPTY(), set_usr_field_call_628); + + // h_tmp447 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x20) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x20) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x28) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x28) & ((st64) 0xff))))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x20) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x20) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x28) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x28) & ((st64) 0xff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((st16) ((st8) ((Rss >> 0x20) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x20) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x28) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x28) & ((st64) 0xff)))))) < ((st64) 0x0)) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_694 = SETL("h_tmp447", cond_691); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ...; + RzILOpEffect *seq_695 = SEQN(2, gcc_expr_692, op_ASSIGN_hybrid_tmp_694); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x20)))) | (((ut64) (((sextract64(((ut64) ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x20) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x20) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x28) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x28) & ((st64) 0xff))))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x20) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x20) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x28) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x28) & ((st64) 0xff))))))) ? ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x20) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x20) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x28) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x28) & ((st64) 0xff)))))) : h_tmp447) & ((st64) 0xffff))) << 0x20))); + RzILOpPure *op_LSHIFT_479 = SHIFTL0(SN(64, 0xffff), SN(32, 0x20)); + RzILOpPure *op_NOT_480 = LOGNOT(op_LSHIFT_479); + RzILOpPure *op_AND_481 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_480); + RzILOpPure *op_RSHIFT_584 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_587 = LOGAND(op_RSHIFT_584, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_594 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_597 = LOGAND(op_RSHIFT_594, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_601 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_587), DUP(op_AND_587))), CAST(8, MSB(DUP(op_AND_587)), DUP(op_AND_587)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_587)), DUP(op_AND_587))), CAST(8, MSB(DUP(op_AND_587)), DUP(op_AND_587)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_597))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_597))))); + RzILOpPure *op_RSHIFT_606 = SHIFTRA(DUP(Rss), SN(32, 0x28)); + RzILOpPure *op_AND_609 = LOGAND(op_RSHIFT_606, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_616 = SHIFTRA(DUP(Rtt), SN(32, 0x28)); + RzILOpPure *op_AND_619 = LOGAND(op_RSHIFT_616, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_623 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_609), DUP(op_AND_609))), CAST(8, MSB(DUP(op_AND_609)), DUP(op_AND_609)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_609)), DUP(op_AND_609))), CAST(8, MSB(DUP(op_AND_609)), DUP(op_AND_609)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_619))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_619))))); + RzILOpPure *op_ADD_625 = ADD(CAST(64, MSB(op_MUL_601), DUP(op_MUL_601)), CAST(64, MSB(op_MUL_623), DUP(op_MUL_623))); + RzILOpPure *cond_696 = ITE(DUP(op_EQ_580), op_ADD_625, VARL("h_tmp447")); + RzILOpPure *op_AND_699 = LOGAND(cond_696, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_704 = SHIFTL0(CAST(64, IL_FALSE, op_AND_699), SN(32, 0x20)); + RzILOpPure *op_OR_706 = LOGOR(CAST(64, IL_FALSE, op_AND_481), op_LSHIFT_704); + RzILOpEffect *op_ASSIGN_708 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_706)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((s ...; + RzILOpEffect *seq_709 = SEQN(2, seq_695, op_ASSIGN_708); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_864 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x30) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x30) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x38) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x38) & ((st64) 0xff))))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x30) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x30) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x38) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x38) & ((st64) 0xff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((st16) ((st8) ((Rss >> 0x30) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x30) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x38) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x38) & ((st64) 0xff)))))) < ((st64) 0x0)) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_724 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_727 = LOGAND(op_RSHIFT_724, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_734 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_737 = LOGAND(op_RSHIFT_734, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_741 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_727), DUP(op_AND_727))), CAST(8, MSB(DUP(op_AND_727)), DUP(op_AND_727)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_727)), DUP(op_AND_727))), CAST(8, MSB(DUP(op_AND_727)), DUP(op_AND_727)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_737))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_737))))); + RzILOpPure *op_RSHIFT_746 = SHIFTRA(DUP(Rss), SN(32, 0x38)); + RzILOpPure *op_AND_749 = LOGAND(op_RSHIFT_746, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_756 = SHIFTRA(DUP(Rtt), SN(32, 0x38)); + RzILOpPure *op_AND_759 = LOGAND(op_RSHIFT_756, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_763 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_749), DUP(op_AND_749))), CAST(8, MSB(DUP(op_AND_749)), DUP(op_AND_749)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_749)), DUP(op_AND_749))), CAST(8, MSB(DUP(op_AND_749)), DUP(op_AND_749)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_759))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_759))))); + RzILOpPure *op_ADD_765 = ADD(CAST(64, MSB(op_MUL_741), DUP(op_MUL_741)), CAST(64, MSB(op_MUL_763), DUP(op_MUL_763))); + RzILOpPure *op_RSHIFT_774 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_777 = LOGAND(op_RSHIFT_774, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_784 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_787 = LOGAND(op_RSHIFT_784, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_791 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_777), DUP(op_AND_777))), CAST(8, MSB(DUP(op_AND_777)), DUP(op_AND_777)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_777)), DUP(op_AND_777))), CAST(8, MSB(DUP(op_AND_777)), DUP(op_AND_777)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_787))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_787))))); + RzILOpPure *op_RSHIFT_796 = SHIFTRA(DUP(Rss), SN(32, 0x38)); + RzILOpPure *op_AND_799 = LOGAND(op_RSHIFT_796, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_806 = SHIFTRA(DUP(Rtt), SN(32, 0x38)); + RzILOpPure *op_AND_809 = LOGAND(op_RSHIFT_806, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_813 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_799), DUP(op_AND_799))), CAST(8, MSB(DUP(op_AND_799)), DUP(op_AND_799)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_799)), DUP(op_AND_799))), CAST(8, MSB(DUP(op_AND_799)), DUP(op_AND_799)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_809))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_809))))); + RzILOpPure *op_ADD_815 = ADD(CAST(64, MSB(op_MUL_791), DUP(op_MUL_791)), CAST(64, MSB(op_MUL_813), DUP(op_MUL_813))); + RzILOpPure *op_EQ_816 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_765), SN(32, 0), SN(32, 16)), op_ADD_815); + RzILOpPure *op_RSHIFT_868 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_871 = LOGAND(op_RSHIFT_868, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_878 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_881 = LOGAND(op_RSHIFT_878, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_885 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_871), DUP(op_AND_871))), CAST(8, MSB(DUP(op_AND_871)), DUP(op_AND_871)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_871)), DUP(op_AND_871))), CAST(8, MSB(DUP(op_AND_871)), DUP(op_AND_871)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_881))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_881))))); + RzILOpPure *op_RSHIFT_890 = SHIFTRA(DUP(Rss), SN(32, 0x38)); + RzILOpPure *op_AND_893 = LOGAND(op_RSHIFT_890, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_900 = SHIFTRA(DUP(Rtt), SN(32, 0x38)); + RzILOpPure *op_AND_903 = LOGAND(op_RSHIFT_900, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_907 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_893), DUP(op_AND_893))), CAST(8, MSB(DUP(op_AND_893)), DUP(op_AND_893)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_893)), DUP(op_AND_893))), CAST(8, MSB(DUP(op_AND_893)), DUP(op_AND_893)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_903))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_903))))); + RzILOpPure *op_ADD_909 = ADD(CAST(64, MSB(op_MUL_885), DUP(op_MUL_885)), CAST(64, MSB(op_MUL_907), DUP(op_MUL_907))); + RzILOpPure *op_LT_912 = SLT(op_ADD_909, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_917 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_918 = NEG(op_LSHIFT_917); + RzILOpPure *op_LSHIFT_923 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_926 = SUB(op_LSHIFT_923, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_927 = ITE(op_LT_912, op_NEG_918, op_SUB_926); + RzILOpEffect *gcc_expr_928 = BRANCH(op_EQ_816, EMPTY(), set_usr_field_call_864); + + // h_tmp448 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x30) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x30) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x38) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x38) & ((st64) 0xff))))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x30) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x30) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x38) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x38) & ((st64) 0xff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((st16) ((st8) ((Rss >> 0x30) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x30) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x38) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x38) & ((st64) 0xff)))))) < ((st64) 0x0)) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_930 = SETL("h_tmp448", cond_927); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((st16) ...; + RzILOpEffect *seq_931 = SEQN(2, gcc_expr_928, op_ASSIGN_hybrid_tmp_930); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x30)))) | (((ut64) (((sextract64(((ut64) ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x30) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x30) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x38) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x38) & ((st64) 0xff))))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x30) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x30) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x38) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x38) & ((st64) 0xff))))))) ? ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x30) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x30) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x38) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x38) & ((st64) 0xff)))))) : h_tmp448) & ((st64) 0xffff))) << 0x30))); + RzILOpPure *op_LSHIFT_715 = SHIFTL0(SN(64, 0xffff), SN(32, 0x30)); + RzILOpPure *op_NOT_716 = LOGNOT(op_LSHIFT_715); + RzILOpPure *op_AND_717 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_716); + RzILOpPure *op_RSHIFT_820 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_823 = LOGAND(op_RSHIFT_820, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_830 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_833 = LOGAND(op_RSHIFT_830, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_837 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_823), DUP(op_AND_823))), CAST(8, MSB(DUP(op_AND_823)), DUP(op_AND_823)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_823)), DUP(op_AND_823))), CAST(8, MSB(DUP(op_AND_823)), DUP(op_AND_823)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_833))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_833))))); + RzILOpPure *op_RSHIFT_842 = SHIFTRA(DUP(Rss), SN(32, 0x38)); + RzILOpPure *op_AND_845 = LOGAND(op_RSHIFT_842, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_852 = SHIFTRA(DUP(Rtt), SN(32, 0x38)); + RzILOpPure *op_AND_855 = LOGAND(op_RSHIFT_852, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_859 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_845), DUP(op_AND_845))), CAST(8, MSB(DUP(op_AND_845)), DUP(op_AND_845)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_845)), DUP(op_AND_845))), CAST(8, MSB(DUP(op_AND_845)), DUP(op_AND_845)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_855))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_855))))); + RzILOpPure *op_ADD_861 = ADD(CAST(64, MSB(op_MUL_837), DUP(op_MUL_837)), CAST(64, MSB(op_MUL_859), DUP(op_MUL_859))); + RzILOpPure *cond_932 = ITE(DUP(op_EQ_816), op_ADD_861, VARL("h_tmp448")); + RzILOpPure *op_AND_935 = LOGAND(cond_932, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_940 = SHIFTL0(CAST(64, IL_FALSE, op_AND_935), SN(32, 0x30)); + RzILOpPure *op_OR_942 = LOGOR(CAST(64, IL_FALSE, op_AND_717), op_LSHIFT_940); + RzILOpEffect *op_ASSIGN_944 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_942)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((s ...; + RzILOpEffect *seq_945 = SEQN(2, seq_931, op_ASSIGN_944); + + RzILOpEffect *instruction_sequence = SEQN(4, seq_237, seq_473, seq_709, seq_945); + return instruction_sequence; +} + +// Rxx += vmpybsu(Rs,Rt) +RzILOpEffect *hex_il_op_m5_vmacbsu(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = ((st64) (((ut64) (Rxx & (~(0xffff << 0x0)))) | (((ut64) (((st64) ((st32) ((st16) ((Rxx >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rs >> 0x0) & 0xff)))) * ((st32) ((st16) ((ut8) ((Rt >> 0x0) & 0xff))))) & ((st64) 0xffff))) << 0x0))); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_11 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_14 = LOGAND(op_RSHIFT_11, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_20 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_20, SN(32, 0xff)); + RzILOpPure *op_RSHIFT_30 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_32 = LOGAND(op_RSHIFT_30, SN(32, 0xff)); + RzILOpPure *op_MUL_36 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_22), DUP(op_AND_22))), CAST(8, MSB(DUP(op_AND_22)), DUP(op_AND_22)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_22)), DUP(op_AND_22))), CAST(8, MSB(DUP(op_AND_22)), DUP(op_AND_22)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_32))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_32))))); + RzILOpPure *op_ADD_40 = ADD(CAST(64, MSB(CAST(32, MSB(CAST(16, MSB(op_AND_14), DUP(op_AND_14))), CAST(16, MSB(DUP(op_AND_14)), DUP(op_AND_14)))), CAST(32, MSB(CAST(16, MSB(DUP(op_AND_14)), DUP(op_AND_14))), CAST(16, MSB(DUP(op_AND_14)), DUP(op_AND_14)))), CAST(64, MSB(op_MUL_36), DUP(op_MUL_36))); + RzILOpPure *op_AND_43 = LOGAND(op_ADD_40, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_48 = SHIFTL0(CAST(64, IL_FALSE, op_AND_43), SN(32, 0)); + RzILOpPure *op_OR_50 = LOGOR(CAST(64, IL_FALSE, op_AND_7), op_LSHIFT_48); + RzILOpEffect *op_ASSIGN_52 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_OR_50)); + + // Rxx = ((st64) (((ut64) (Rxx & (~(0xffff << 0x10)))) | (((ut64) (((st64) ((st32) ((st16) ((Rxx >> 0x10) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rs >> 0x8) & 0xff)))) * ((st32) ((st16) ((ut8) ((Rt >> 0x8) & 0xff))))) & ((st64) 0xffff))) << 0x10))); + RzILOpPure *op_LSHIFT_58 = SHIFTL0(SN(64, 0xffff), SN(32, 16)); + RzILOpPure *op_NOT_59 = LOGNOT(op_LSHIFT_58); + RzILOpPure *op_AND_60 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_59); + RzILOpPure *op_RSHIFT_64 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 16)); + RzILOpPure *op_AND_67 = LOGAND(op_RSHIFT_64, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_72 = SHIFTRA(DUP(Rs), SN(32, 8)); + RzILOpPure *op_AND_74 = LOGAND(op_RSHIFT_72, SN(32, 0xff)); + RzILOpPure *op_RSHIFT_81 = SHIFTRA(DUP(Rt), SN(32, 8)); + RzILOpPure *op_AND_83 = LOGAND(op_RSHIFT_81, SN(32, 0xff)); + RzILOpPure *op_MUL_87 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_74), DUP(op_AND_74))), CAST(8, MSB(DUP(op_AND_74)), DUP(op_AND_74)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_74)), DUP(op_AND_74))), CAST(8, MSB(DUP(op_AND_74)), DUP(op_AND_74)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_83))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_83))))); + RzILOpPure *op_ADD_91 = ADD(CAST(64, MSB(CAST(32, MSB(CAST(16, MSB(op_AND_67), DUP(op_AND_67))), CAST(16, MSB(DUP(op_AND_67)), DUP(op_AND_67)))), CAST(32, MSB(CAST(16, MSB(DUP(op_AND_67)), DUP(op_AND_67))), CAST(16, MSB(DUP(op_AND_67)), DUP(op_AND_67)))), CAST(64, MSB(op_MUL_87), DUP(op_MUL_87))); + RzILOpPure *op_AND_94 = LOGAND(op_ADD_91, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_99 = SHIFTL0(CAST(64, IL_FALSE, op_AND_94), SN(32, 16)); + RzILOpPure *op_OR_101 = LOGOR(CAST(64, IL_FALSE, op_AND_60), op_LSHIFT_99); + RzILOpEffect *op_ASSIGN_103 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_OR_101)); + + // Rxx = ((st64) (((ut64) (Rxx & (~(0xffff << 0x20)))) | (((ut64) (((st64) ((st32) ((st16) ((Rxx >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rs >> 0x10) & 0xff)))) * ((st32) ((st16) ((ut8) ((Rt >> 0x10) & 0xff))))) & ((st64) 0xffff))) << 0x20))); + RzILOpPure *op_LSHIFT_109 = SHIFTL0(SN(64, 0xffff), SN(32, 0x20)); + RzILOpPure *op_NOT_110 = LOGNOT(op_LSHIFT_109); + RzILOpPure *op_AND_111 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_110); + RzILOpPure *op_RSHIFT_115 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_118 = LOGAND(op_RSHIFT_115, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_123 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_125 = LOGAND(op_RSHIFT_123, SN(32, 0xff)); + RzILOpPure *op_RSHIFT_132 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_134 = LOGAND(op_RSHIFT_132, SN(32, 0xff)); + RzILOpPure *op_MUL_138 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_125), DUP(op_AND_125))), CAST(8, MSB(DUP(op_AND_125)), DUP(op_AND_125)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_125)), DUP(op_AND_125))), CAST(8, MSB(DUP(op_AND_125)), DUP(op_AND_125)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_134))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_134))))); + RzILOpPure *op_ADD_142 = ADD(CAST(64, MSB(CAST(32, MSB(CAST(16, MSB(op_AND_118), DUP(op_AND_118))), CAST(16, MSB(DUP(op_AND_118)), DUP(op_AND_118)))), CAST(32, MSB(CAST(16, MSB(DUP(op_AND_118)), DUP(op_AND_118))), CAST(16, MSB(DUP(op_AND_118)), DUP(op_AND_118)))), CAST(64, MSB(op_MUL_138), DUP(op_MUL_138))); + RzILOpPure *op_AND_145 = LOGAND(op_ADD_142, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_150 = SHIFTL0(CAST(64, IL_FALSE, op_AND_145), SN(32, 0x20)); + RzILOpPure *op_OR_152 = LOGOR(CAST(64, IL_FALSE, op_AND_111), op_LSHIFT_150); + RzILOpEffect *op_ASSIGN_154 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_OR_152)); + + // Rxx = ((st64) (((ut64) (Rxx & (~(0xffff << 0x30)))) | (((ut64) (((st64) ((st32) ((st16) ((Rxx >> 0x30) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((st8) ((Rs >> 0x18) & 0xff)))) * ((st32) ((st16) ((ut8) ((Rt >> 0x18) & 0xff))))) & ((st64) 0xffff))) << 0x30))); + RzILOpPure *op_LSHIFT_160 = SHIFTL0(SN(64, 0xffff), SN(32, 0x30)); + RzILOpPure *op_NOT_161 = LOGNOT(op_LSHIFT_160); + RzILOpPure *op_AND_162 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_161); + RzILOpPure *op_RSHIFT_166 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x30)); + RzILOpPure *op_AND_169 = LOGAND(op_RSHIFT_166, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_174 = SHIFTRA(DUP(Rs), SN(32, 24)); + RzILOpPure *op_AND_176 = LOGAND(op_RSHIFT_174, SN(32, 0xff)); + RzILOpPure *op_RSHIFT_183 = SHIFTRA(DUP(Rt), SN(32, 24)); + RzILOpPure *op_AND_185 = LOGAND(op_RSHIFT_183, SN(32, 0xff)); + RzILOpPure *op_MUL_189 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_176), DUP(op_AND_176))), CAST(8, MSB(DUP(op_AND_176)), DUP(op_AND_176)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_176)), DUP(op_AND_176))), CAST(8, MSB(DUP(op_AND_176)), DUP(op_AND_176)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_185))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_185))))); + RzILOpPure *op_ADD_193 = ADD(CAST(64, MSB(CAST(32, MSB(CAST(16, MSB(op_AND_169), DUP(op_AND_169))), CAST(16, MSB(DUP(op_AND_169)), DUP(op_AND_169)))), CAST(32, MSB(CAST(16, MSB(DUP(op_AND_169)), DUP(op_AND_169))), CAST(16, MSB(DUP(op_AND_169)), DUP(op_AND_169)))), CAST(64, MSB(op_MUL_189), DUP(op_MUL_189))); + RzILOpPure *op_AND_196 = LOGAND(op_ADD_193, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_201 = SHIFTL0(CAST(64, IL_FALSE, op_AND_196), SN(32, 0x30)); + RzILOpPure *op_OR_203 = LOGOR(CAST(64, IL_FALSE, op_AND_162), op_LSHIFT_201); + RzILOpEffect *op_ASSIGN_205 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_OR_203)); + + RzILOpEffect *instruction_sequence = SEQN(4, op_ASSIGN_52, op_ASSIGN_103, op_ASSIGN_154, op_ASSIGN_205); + return instruction_sequence; +} + +// Rxx += vmpybu(Rs,Rt) +RzILOpEffect *hex_il_op_m5_vmacbuu(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rxx = ((st64) (((ut64) (Rxx & (~(0xffff << 0x0)))) | (((ut64) (((st64) ((st32) ((st16) ((Rxx >> 0x0) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((ut8) ((Rs >> 0x0) & 0xff)))) * ((st32) ((st16) ((ut8) ((Rt >> 0x0) & 0xff))))) & ((st64) 0xffff))) << 0x0))); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_11 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_14 = LOGAND(op_RSHIFT_11, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_20 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_20, SN(32, 0xff)); + RzILOpPure *op_RSHIFT_30 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_32 = LOGAND(op_RSHIFT_30, SN(32, 0xff)); + RzILOpPure *op_MUL_36 = MUL(CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_22))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_22)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_32))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_32))))); + RzILOpPure *op_ADD_40 = ADD(CAST(64, MSB(CAST(32, MSB(CAST(16, MSB(op_AND_14), DUP(op_AND_14))), CAST(16, MSB(DUP(op_AND_14)), DUP(op_AND_14)))), CAST(32, MSB(CAST(16, MSB(DUP(op_AND_14)), DUP(op_AND_14))), CAST(16, MSB(DUP(op_AND_14)), DUP(op_AND_14)))), CAST(64, MSB(op_MUL_36), DUP(op_MUL_36))); + RzILOpPure *op_AND_43 = LOGAND(op_ADD_40, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_48 = SHIFTL0(CAST(64, IL_FALSE, op_AND_43), SN(32, 0)); + RzILOpPure *op_OR_50 = LOGOR(CAST(64, IL_FALSE, op_AND_7), op_LSHIFT_48); + RzILOpEffect *op_ASSIGN_52 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_OR_50)); + + // Rxx = ((st64) (((ut64) (Rxx & (~(0xffff << 0x10)))) | (((ut64) (((st64) ((st32) ((st16) ((Rxx >> 0x10) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((ut8) ((Rs >> 0x8) & 0xff)))) * ((st32) ((st16) ((ut8) ((Rt >> 0x8) & 0xff))))) & ((st64) 0xffff))) << 0x10))); + RzILOpPure *op_LSHIFT_58 = SHIFTL0(SN(64, 0xffff), SN(32, 16)); + RzILOpPure *op_NOT_59 = LOGNOT(op_LSHIFT_58); + RzILOpPure *op_AND_60 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_59); + RzILOpPure *op_RSHIFT_64 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 16)); + RzILOpPure *op_AND_67 = LOGAND(op_RSHIFT_64, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_72 = SHIFTRA(DUP(Rs), SN(32, 8)); + RzILOpPure *op_AND_74 = LOGAND(op_RSHIFT_72, SN(32, 0xff)); + RzILOpPure *op_RSHIFT_81 = SHIFTRA(DUP(Rt), SN(32, 8)); + RzILOpPure *op_AND_83 = LOGAND(op_RSHIFT_81, SN(32, 0xff)); + RzILOpPure *op_MUL_87 = MUL(CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_74))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_74)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_83))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_83))))); + RzILOpPure *op_ADD_91 = ADD(CAST(64, MSB(CAST(32, MSB(CAST(16, MSB(op_AND_67), DUP(op_AND_67))), CAST(16, MSB(DUP(op_AND_67)), DUP(op_AND_67)))), CAST(32, MSB(CAST(16, MSB(DUP(op_AND_67)), DUP(op_AND_67))), CAST(16, MSB(DUP(op_AND_67)), DUP(op_AND_67)))), CAST(64, MSB(op_MUL_87), DUP(op_MUL_87))); + RzILOpPure *op_AND_94 = LOGAND(op_ADD_91, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_99 = SHIFTL0(CAST(64, IL_FALSE, op_AND_94), SN(32, 16)); + RzILOpPure *op_OR_101 = LOGOR(CAST(64, IL_FALSE, op_AND_60), op_LSHIFT_99); + RzILOpEffect *op_ASSIGN_103 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_OR_101)); + + // Rxx = ((st64) (((ut64) (Rxx & (~(0xffff << 0x20)))) | (((ut64) (((st64) ((st32) ((st16) ((Rxx >> 0x20) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((ut8) ((Rs >> 0x10) & 0xff)))) * ((st32) ((st16) ((ut8) ((Rt >> 0x10) & 0xff))))) & ((st64) 0xffff))) << 0x20))); + RzILOpPure *op_LSHIFT_109 = SHIFTL0(SN(64, 0xffff), SN(32, 0x20)); + RzILOpPure *op_NOT_110 = LOGNOT(op_LSHIFT_109); + RzILOpPure *op_AND_111 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_110); + RzILOpPure *op_RSHIFT_115 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_118 = LOGAND(op_RSHIFT_115, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_123 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_125 = LOGAND(op_RSHIFT_123, SN(32, 0xff)); + RzILOpPure *op_RSHIFT_132 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_134 = LOGAND(op_RSHIFT_132, SN(32, 0xff)); + RzILOpPure *op_MUL_138 = MUL(CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_125))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_125)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_134))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_134))))); + RzILOpPure *op_ADD_142 = ADD(CAST(64, MSB(CAST(32, MSB(CAST(16, MSB(op_AND_118), DUP(op_AND_118))), CAST(16, MSB(DUP(op_AND_118)), DUP(op_AND_118)))), CAST(32, MSB(CAST(16, MSB(DUP(op_AND_118)), DUP(op_AND_118))), CAST(16, MSB(DUP(op_AND_118)), DUP(op_AND_118)))), CAST(64, MSB(op_MUL_138), DUP(op_MUL_138))); + RzILOpPure *op_AND_145 = LOGAND(op_ADD_142, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_150 = SHIFTL0(CAST(64, IL_FALSE, op_AND_145), SN(32, 0x20)); + RzILOpPure *op_OR_152 = LOGOR(CAST(64, IL_FALSE, op_AND_111), op_LSHIFT_150); + RzILOpEffect *op_ASSIGN_154 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_OR_152)); + + // Rxx = ((st64) (((ut64) (Rxx & (~(0xffff << 0x30)))) | (((ut64) (((st64) ((st32) ((st16) ((Rxx >> 0x30) & ((st64) 0xffff))))) + ((st64) ((st32) ((st16) ((ut8) ((Rs >> 0x18) & 0xff)))) * ((st32) ((st16) ((ut8) ((Rt >> 0x18) & 0xff))))) & ((st64) 0xffff))) << 0x30))); + RzILOpPure *op_LSHIFT_160 = SHIFTL0(SN(64, 0xffff), SN(32, 0x30)); + RzILOpPure *op_NOT_161 = LOGNOT(op_LSHIFT_160); + RzILOpPure *op_AND_162 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_161); + RzILOpPure *op_RSHIFT_166 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x30)); + RzILOpPure *op_AND_169 = LOGAND(op_RSHIFT_166, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_174 = SHIFTRA(DUP(Rs), SN(32, 24)); + RzILOpPure *op_AND_176 = LOGAND(op_RSHIFT_174, SN(32, 0xff)); + RzILOpPure *op_RSHIFT_183 = SHIFTRA(DUP(Rt), SN(32, 24)); + RzILOpPure *op_AND_185 = LOGAND(op_RSHIFT_183, SN(32, 0xff)); + RzILOpPure *op_MUL_189 = MUL(CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_176))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_176)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_185))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_185))))); + RzILOpPure *op_ADD_193 = ADD(CAST(64, MSB(CAST(32, MSB(CAST(16, MSB(op_AND_169), DUP(op_AND_169))), CAST(16, MSB(DUP(op_AND_169)), DUP(op_AND_169)))), CAST(32, MSB(CAST(16, MSB(DUP(op_AND_169)), DUP(op_AND_169))), CAST(16, MSB(DUP(op_AND_169)), DUP(op_AND_169)))), CAST(64, MSB(op_MUL_189), DUP(op_MUL_189))); + RzILOpPure *op_AND_196 = LOGAND(op_ADD_193, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_201 = SHIFTL0(CAST(64, IL_FALSE, op_AND_196), SN(32, 0x30)); + RzILOpPure *op_OR_203 = LOGOR(CAST(64, IL_FALSE, op_AND_162), op_LSHIFT_201); + RzILOpEffect *op_ASSIGN_205 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_OR_203)); + + RzILOpEffect *instruction_sequence = SEQN(4, op_ASSIGN_52, op_ASSIGN_103, op_ASSIGN_154, op_ASSIGN_205); + return instruction_sequence; +} + +// Rdd = vmpybsu(Rs,Rt) +RzILOpEffect *hex_il_op_m5_vmpybsu(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x0)))) | (((ut64) (((st64) ((st32) ((st16) ((st8) ((Rs >> 0x0) & 0xff)))) * ((st32) ((st16) ((ut8) ((Rt >> 0x0) & 0xff))))) & ((st64) 0xffff))) << 0x0))); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_12 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_14 = LOGAND(op_RSHIFT_12, SN(32, 0xff)); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_24 = LOGAND(op_RSHIFT_22, SN(32, 0xff)); + RzILOpPure *op_MUL_28 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_14), DUP(op_AND_14))), CAST(8, MSB(DUP(op_AND_14)), DUP(op_AND_14)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_14)), DUP(op_AND_14))), CAST(8, MSB(DUP(op_AND_14)), DUP(op_AND_14)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_24))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_24))))); + RzILOpPure *op_AND_32 = LOGAND(CAST(64, MSB(op_MUL_28), DUP(op_MUL_28)), CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_37 = SHIFTL0(CAST(64, IL_FALSE, op_AND_32), SN(32, 0)); + RzILOpPure *op_OR_39 = LOGOR(CAST(64, IL_FALSE, op_AND_7), op_LSHIFT_37); + RzILOpEffect *op_ASSIGN_41 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_39)); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x10)))) | (((ut64) (((st64) ((st32) ((st16) ((st8) ((Rs >> 0x8) & 0xff)))) * ((st32) ((st16) ((ut8) ((Rt >> 0x8) & 0xff))))) & ((st64) 0xffff))) << 0x10))); + RzILOpPure *op_LSHIFT_47 = SHIFTL0(SN(64, 0xffff), SN(32, 16)); + RzILOpPure *op_NOT_48 = LOGNOT(op_LSHIFT_47); + RzILOpPure *op_AND_49 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_48); + RzILOpPure *op_RSHIFT_53 = SHIFTRA(DUP(Rs), SN(32, 8)); + RzILOpPure *op_AND_55 = LOGAND(op_RSHIFT_53, SN(32, 0xff)); + RzILOpPure *op_RSHIFT_62 = SHIFTRA(DUP(Rt), SN(32, 8)); + RzILOpPure *op_AND_64 = LOGAND(op_RSHIFT_62, SN(32, 0xff)); + RzILOpPure *op_MUL_68 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_55), DUP(op_AND_55))), CAST(8, MSB(DUP(op_AND_55)), DUP(op_AND_55)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_55)), DUP(op_AND_55))), CAST(8, MSB(DUP(op_AND_55)), DUP(op_AND_55)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_64))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_64))))); + RzILOpPure *op_AND_72 = LOGAND(CAST(64, MSB(op_MUL_68), DUP(op_MUL_68)), CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_77 = SHIFTL0(CAST(64, IL_FALSE, op_AND_72), SN(32, 16)); + RzILOpPure *op_OR_79 = LOGOR(CAST(64, IL_FALSE, op_AND_49), op_LSHIFT_77); + RzILOpEffect *op_ASSIGN_81 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_79)); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x20)))) | (((ut64) (((st64) ((st32) ((st16) ((st8) ((Rs >> 0x10) & 0xff)))) * ((st32) ((st16) ((ut8) ((Rt >> 0x10) & 0xff))))) & ((st64) 0xffff))) << 0x20))); + RzILOpPure *op_LSHIFT_87 = SHIFTL0(SN(64, 0xffff), SN(32, 0x20)); + RzILOpPure *op_NOT_88 = LOGNOT(op_LSHIFT_87); + RzILOpPure *op_AND_89 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_88); + RzILOpPure *op_RSHIFT_93 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_95 = LOGAND(op_RSHIFT_93, SN(32, 0xff)); + RzILOpPure *op_RSHIFT_102 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_104 = LOGAND(op_RSHIFT_102, SN(32, 0xff)); + RzILOpPure *op_MUL_108 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_95), DUP(op_AND_95))), CAST(8, MSB(DUP(op_AND_95)), DUP(op_AND_95)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_95)), DUP(op_AND_95))), CAST(8, MSB(DUP(op_AND_95)), DUP(op_AND_95)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_104))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_104))))); + RzILOpPure *op_AND_112 = LOGAND(CAST(64, MSB(op_MUL_108), DUP(op_MUL_108)), CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_117 = SHIFTL0(CAST(64, IL_FALSE, op_AND_112), SN(32, 0x20)); + RzILOpPure *op_OR_119 = LOGOR(CAST(64, IL_FALSE, op_AND_89), op_LSHIFT_117); + RzILOpEffect *op_ASSIGN_121 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_119)); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x30)))) | (((ut64) (((st64) ((st32) ((st16) ((st8) ((Rs >> 0x18) & 0xff)))) * ((st32) ((st16) ((ut8) ((Rt >> 0x18) & 0xff))))) & ((st64) 0xffff))) << 0x30))); + RzILOpPure *op_LSHIFT_127 = SHIFTL0(SN(64, 0xffff), SN(32, 0x30)); + RzILOpPure *op_NOT_128 = LOGNOT(op_LSHIFT_127); + RzILOpPure *op_AND_129 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_128); + RzILOpPure *op_RSHIFT_133 = SHIFTRA(DUP(Rs), SN(32, 24)); + RzILOpPure *op_AND_135 = LOGAND(op_RSHIFT_133, SN(32, 0xff)); + RzILOpPure *op_RSHIFT_142 = SHIFTRA(DUP(Rt), SN(32, 24)); + RzILOpPure *op_AND_144 = LOGAND(op_RSHIFT_142, SN(32, 0xff)); + RzILOpPure *op_MUL_148 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_135), DUP(op_AND_135))), CAST(8, MSB(DUP(op_AND_135)), DUP(op_AND_135)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_135)), DUP(op_AND_135))), CAST(8, MSB(DUP(op_AND_135)), DUP(op_AND_135)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_144))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_144))))); + RzILOpPure *op_AND_152 = LOGAND(CAST(64, MSB(op_MUL_148), DUP(op_MUL_148)), CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_157 = SHIFTL0(CAST(64, IL_FALSE, op_AND_152), SN(32, 0x30)); + RzILOpPure *op_OR_159 = LOGOR(CAST(64, IL_FALSE, op_AND_129), op_LSHIFT_157); + RzILOpEffect *op_ASSIGN_161 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_159)); + + RzILOpEffect *instruction_sequence = SEQN(4, op_ASSIGN_41, op_ASSIGN_81, op_ASSIGN_121, op_ASSIGN_161); + return instruction_sequence; +} + +// Rdd = vmpybu(Rs,Rt) +RzILOpEffect *hex_il_op_m5_vmpybuu(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x0)))) | (((ut64) (((st64) ((st32) ((st16) ((ut8) ((Rs >> 0x0) & 0xff)))) * ((st32) ((st16) ((ut8) ((Rt >> 0x0) & 0xff))))) & ((st64) 0xffff))) << 0x0))); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_12 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_14 = LOGAND(op_RSHIFT_12, SN(32, 0xff)); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_24 = LOGAND(op_RSHIFT_22, SN(32, 0xff)); + RzILOpPure *op_MUL_28 = MUL(CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_14))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_14)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_24))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_24))))); + RzILOpPure *op_AND_32 = LOGAND(CAST(64, MSB(op_MUL_28), DUP(op_MUL_28)), CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_37 = SHIFTL0(CAST(64, IL_FALSE, op_AND_32), SN(32, 0)); + RzILOpPure *op_OR_39 = LOGOR(CAST(64, IL_FALSE, op_AND_7), op_LSHIFT_37); + RzILOpEffect *op_ASSIGN_41 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_39)); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x10)))) | (((ut64) (((st64) ((st32) ((st16) ((ut8) ((Rs >> 0x8) & 0xff)))) * ((st32) ((st16) ((ut8) ((Rt >> 0x8) & 0xff))))) & ((st64) 0xffff))) << 0x10))); + RzILOpPure *op_LSHIFT_47 = SHIFTL0(SN(64, 0xffff), SN(32, 16)); + RzILOpPure *op_NOT_48 = LOGNOT(op_LSHIFT_47); + RzILOpPure *op_AND_49 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_48); + RzILOpPure *op_RSHIFT_53 = SHIFTRA(DUP(Rs), SN(32, 8)); + RzILOpPure *op_AND_55 = LOGAND(op_RSHIFT_53, SN(32, 0xff)); + RzILOpPure *op_RSHIFT_62 = SHIFTRA(DUP(Rt), SN(32, 8)); + RzILOpPure *op_AND_64 = LOGAND(op_RSHIFT_62, SN(32, 0xff)); + RzILOpPure *op_MUL_68 = MUL(CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_55))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_55)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_64))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_64))))); + RzILOpPure *op_AND_72 = LOGAND(CAST(64, MSB(op_MUL_68), DUP(op_MUL_68)), CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_77 = SHIFTL0(CAST(64, IL_FALSE, op_AND_72), SN(32, 16)); + RzILOpPure *op_OR_79 = LOGOR(CAST(64, IL_FALSE, op_AND_49), op_LSHIFT_77); + RzILOpEffect *op_ASSIGN_81 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_79)); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x20)))) | (((ut64) (((st64) ((st32) ((st16) ((ut8) ((Rs >> 0x10) & 0xff)))) * ((st32) ((st16) ((ut8) ((Rt >> 0x10) & 0xff))))) & ((st64) 0xffff))) << 0x20))); + RzILOpPure *op_LSHIFT_87 = SHIFTL0(SN(64, 0xffff), SN(32, 0x20)); + RzILOpPure *op_NOT_88 = LOGNOT(op_LSHIFT_87); + RzILOpPure *op_AND_89 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_88); + RzILOpPure *op_RSHIFT_93 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_95 = LOGAND(op_RSHIFT_93, SN(32, 0xff)); + RzILOpPure *op_RSHIFT_102 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_104 = LOGAND(op_RSHIFT_102, SN(32, 0xff)); + RzILOpPure *op_MUL_108 = MUL(CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_95))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_95)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_104))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_104))))); + RzILOpPure *op_AND_112 = LOGAND(CAST(64, MSB(op_MUL_108), DUP(op_MUL_108)), CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_117 = SHIFTL0(CAST(64, IL_FALSE, op_AND_112), SN(32, 0x20)); + RzILOpPure *op_OR_119 = LOGOR(CAST(64, IL_FALSE, op_AND_89), op_LSHIFT_117); + RzILOpEffect *op_ASSIGN_121 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_119)); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x30)))) | (((ut64) (((st64) ((st32) ((st16) ((ut8) ((Rs >> 0x18) & 0xff)))) * ((st32) ((st16) ((ut8) ((Rt >> 0x18) & 0xff))))) & ((st64) 0xffff))) << 0x30))); + RzILOpPure *op_LSHIFT_127 = SHIFTL0(SN(64, 0xffff), SN(32, 0x30)); + RzILOpPure *op_NOT_128 = LOGNOT(op_LSHIFT_127); + RzILOpPure *op_AND_129 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_128); + RzILOpPure *op_RSHIFT_133 = SHIFTRA(DUP(Rs), SN(32, 24)); + RzILOpPure *op_AND_135 = LOGAND(op_RSHIFT_133, SN(32, 0xff)); + RzILOpPure *op_RSHIFT_142 = SHIFTRA(DUP(Rt), SN(32, 24)); + RzILOpPure *op_AND_144 = LOGAND(op_RSHIFT_142, SN(32, 0xff)); + RzILOpPure *op_MUL_148 = MUL(CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_135))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_135)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_144))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_144))))); + RzILOpPure *op_AND_152 = LOGAND(CAST(64, MSB(op_MUL_148), DUP(op_MUL_148)), CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_157 = SHIFTL0(CAST(64, IL_FALSE, op_AND_152), SN(32, 0x30)); + RzILOpPure *op_OR_159 = LOGOR(CAST(64, IL_FALSE, op_AND_129), op_LSHIFT_157); + RzILOpEffect *op_ASSIGN_161 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_159)); + + RzILOpEffect *instruction_sequence = SEQN(4, op_ASSIGN_41, op_ASSIGN_81, op_ASSIGN_121, op_ASSIGN_161); + return instruction_sequence; +} + +// Rxx += vrmpybsu(Rss,Rtt) +RzILOpEffect *hex_il_op_m5_vrmacbsu(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x0) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x0) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x8) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x8) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x10) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x10) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x18) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x18) & ((st64) 0xff)))))) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_11 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_13 = LOGAND(op_RSHIFT_11, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_20 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_23 = LOGAND(op_RSHIFT_20, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_31 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_34 = LOGAND(op_RSHIFT_31, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_38 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_23), DUP(op_AND_23))), CAST(8, MSB(DUP(op_AND_23)), DUP(op_AND_23)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_23)), DUP(op_AND_23))), CAST(8, MSB(DUP(op_AND_23)), DUP(op_AND_23)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_34))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_34))))); + RzILOpPure *op_ADD_40 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_13), DUP(op_AND_13))), CAST(32, MSB(DUP(op_AND_13)), DUP(op_AND_13))), CAST(64, MSB(op_MUL_38), DUP(op_MUL_38))); + RzILOpPure *op_RSHIFT_44 = SHIFTRA(DUP(Rss), SN(32, 8)); + RzILOpPure *op_AND_47 = LOGAND(op_RSHIFT_44, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_54 = SHIFTRA(DUP(Rtt), SN(32, 8)); + RzILOpPure *op_AND_57 = LOGAND(op_RSHIFT_54, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_61 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_47), DUP(op_AND_47))), CAST(8, MSB(DUP(op_AND_47)), DUP(op_AND_47)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_47)), DUP(op_AND_47))), CAST(8, MSB(DUP(op_AND_47)), DUP(op_AND_47)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_57))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_57))))); + RzILOpPure *op_ADD_63 = ADD(op_ADD_40, CAST(64, MSB(op_MUL_61), DUP(op_MUL_61))); + RzILOpPure *op_RSHIFT_67 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_70 = LOGAND(op_RSHIFT_67, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_77 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_80 = LOGAND(op_RSHIFT_77, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_84 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_70), DUP(op_AND_70))), CAST(8, MSB(DUP(op_AND_70)), DUP(op_AND_70)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_70)), DUP(op_AND_70))), CAST(8, MSB(DUP(op_AND_70)), DUP(op_AND_70)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_80))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_80))))); + RzILOpPure *op_ADD_86 = ADD(op_ADD_63, CAST(64, MSB(op_MUL_84), DUP(op_MUL_84))); + RzILOpPure *op_RSHIFT_90 = SHIFTRA(DUP(Rss), SN(32, 24)); + RzILOpPure *op_AND_93 = LOGAND(op_RSHIFT_90, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_100 = SHIFTRA(DUP(Rtt), SN(32, 24)); + RzILOpPure *op_AND_103 = LOGAND(op_RSHIFT_100, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_107 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_93), DUP(op_AND_93))), CAST(8, MSB(DUP(op_AND_93)), DUP(op_AND_93)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_93)), DUP(op_AND_93))), CAST(8, MSB(DUP(op_AND_93)), DUP(op_AND_93)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_103))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_103))))); + RzILOpPure *op_ADD_109 = ADD(op_ADD_86, CAST(64, MSB(op_MUL_107), DUP(op_MUL_107))); + RzILOpPure *op_AND_111 = LOGAND(op_ADD_109, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_115 = SHIFTL0(op_AND_111, SN(32, 0)); + RzILOpPure *op_OR_116 = LOGOR(op_AND_7, op_LSHIFT_115); + RzILOpEffect *op_ASSIGN_117 = WRITE_REG(bundle, Rxx_op, op_OR_116); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x20) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x20) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x28) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x28) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x30) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x30) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x38) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x38) & ((st64) 0xff)))))) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_123 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_124 = LOGNOT(op_LSHIFT_123); + RzILOpPure *op_AND_125 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_124); + RzILOpPure *op_RSHIFT_129 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_131 = LOGAND(op_RSHIFT_129, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_137 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_140 = LOGAND(op_RSHIFT_137, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_147 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_150 = LOGAND(op_RSHIFT_147, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_154 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_140), DUP(op_AND_140))), CAST(8, MSB(DUP(op_AND_140)), DUP(op_AND_140)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_140)), DUP(op_AND_140))), CAST(8, MSB(DUP(op_AND_140)), DUP(op_AND_140)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_150))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_150))))); + RzILOpPure *op_ADD_156 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_131), DUP(op_AND_131))), CAST(32, MSB(DUP(op_AND_131)), DUP(op_AND_131))), CAST(64, MSB(op_MUL_154), DUP(op_MUL_154))); + RzILOpPure *op_RSHIFT_160 = SHIFTRA(DUP(Rss), SN(32, 0x28)); + RzILOpPure *op_AND_163 = LOGAND(op_RSHIFT_160, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_170 = SHIFTRA(DUP(Rtt), SN(32, 0x28)); + RzILOpPure *op_AND_173 = LOGAND(op_RSHIFT_170, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_177 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_163), DUP(op_AND_163))), CAST(8, MSB(DUP(op_AND_163)), DUP(op_AND_163)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_163)), DUP(op_AND_163))), CAST(8, MSB(DUP(op_AND_163)), DUP(op_AND_163)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_173))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_173))))); + RzILOpPure *op_ADD_179 = ADD(op_ADD_156, CAST(64, MSB(op_MUL_177), DUP(op_MUL_177))); + RzILOpPure *op_RSHIFT_183 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_186 = LOGAND(op_RSHIFT_183, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_193 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_196 = LOGAND(op_RSHIFT_193, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_200 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_186), DUP(op_AND_186))), CAST(8, MSB(DUP(op_AND_186)), DUP(op_AND_186)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_186)), DUP(op_AND_186))), CAST(8, MSB(DUP(op_AND_186)), DUP(op_AND_186)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_196))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_196))))); + RzILOpPure *op_ADD_202 = ADD(op_ADD_179, CAST(64, MSB(op_MUL_200), DUP(op_MUL_200))); + RzILOpPure *op_RSHIFT_206 = SHIFTRA(DUP(Rss), SN(32, 0x38)); + RzILOpPure *op_AND_209 = LOGAND(op_RSHIFT_206, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_216 = SHIFTRA(DUP(Rtt), SN(32, 0x38)); + RzILOpPure *op_AND_219 = LOGAND(op_RSHIFT_216, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_223 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_209), DUP(op_AND_209))), CAST(8, MSB(DUP(op_AND_209)), DUP(op_AND_209)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_209)), DUP(op_AND_209))), CAST(8, MSB(DUP(op_AND_209)), DUP(op_AND_209)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_219))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_219))))); + RzILOpPure *op_ADD_225 = ADD(op_ADD_202, CAST(64, MSB(op_MUL_223), DUP(op_MUL_223))); + RzILOpPure *op_AND_227 = LOGAND(op_ADD_225, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_231 = SHIFTL0(op_AND_227, SN(32, 0x20)); + RzILOpPure *op_OR_232 = LOGOR(op_AND_125, op_LSHIFT_231); + RzILOpEffect *op_ASSIGN_233 = WRITE_REG(bundle, Rxx_op, op_OR_232); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_117, op_ASSIGN_233); + return instruction_sequence; +} + +// Rxx += vrmpybu(Rss,Rtt) +RzILOpEffect *hex_il_op_m5_vrmacbuu(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rxx = ((Rxx & (~(0xffffffff << 0x0))) | ((((st64) ((st32) ((Rxx >> 0x0) & 0xffffffff))) + ((st64) ((st32) ((st16) ((ut8) ((Rss >> 0x0) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x0) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((ut8) ((Rss >> 0x8) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x8) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((ut8) ((Rss >> 0x10) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x10) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((ut8) ((Rss >> 0x18) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x18) & ((st64) 0xff)))))) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_6); + RzILOpPure *op_RSHIFT_11 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0)); + RzILOpPure *op_AND_13 = LOGAND(op_RSHIFT_11, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_20 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_23 = LOGAND(op_RSHIFT_20, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_31 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_34 = LOGAND(op_RSHIFT_31, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_38 = MUL(CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_23))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_23)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_34))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_34))))); + RzILOpPure *op_ADD_40 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_13), DUP(op_AND_13))), CAST(32, MSB(DUP(op_AND_13)), DUP(op_AND_13))), CAST(64, MSB(op_MUL_38), DUP(op_MUL_38))); + RzILOpPure *op_RSHIFT_44 = SHIFTRA(DUP(Rss), SN(32, 8)); + RzILOpPure *op_AND_47 = LOGAND(op_RSHIFT_44, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_54 = SHIFTRA(DUP(Rtt), SN(32, 8)); + RzILOpPure *op_AND_57 = LOGAND(op_RSHIFT_54, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_61 = MUL(CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_47))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_47)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_57))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_57))))); + RzILOpPure *op_ADD_63 = ADD(op_ADD_40, CAST(64, MSB(op_MUL_61), DUP(op_MUL_61))); + RzILOpPure *op_RSHIFT_67 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_70 = LOGAND(op_RSHIFT_67, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_77 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_80 = LOGAND(op_RSHIFT_77, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_84 = MUL(CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_70))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_70)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_80))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_80))))); + RzILOpPure *op_ADD_86 = ADD(op_ADD_63, CAST(64, MSB(op_MUL_84), DUP(op_MUL_84))); + RzILOpPure *op_RSHIFT_90 = SHIFTRA(DUP(Rss), SN(32, 24)); + RzILOpPure *op_AND_93 = LOGAND(op_RSHIFT_90, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_100 = SHIFTRA(DUP(Rtt), SN(32, 24)); + RzILOpPure *op_AND_103 = LOGAND(op_RSHIFT_100, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_107 = MUL(CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_93))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_93)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_103))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_103))))); + RzILOpPure *op_ADD_109 = ADD(op_ADD_86, CAST(64, MSB(op_MUL_107), DUP(op_MUL_107))); + RzILOpPure *op_AND_111 = LOGAND(op_ADD_109, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_115 = SHIFTL0(op_AND_111, SN(32, 0)); + RzILOpPure *op_OR_116 = LOGOR(op_AND_7, op_LSHIFT_115); + RzILOpEffect *op_ASSIGN_117 = WRITE_REG(bundle, Rxx_op, op_OR_116); + + // Rxx = ((Rxx & (~(0xffffffff << 0x20))) | ((((st64) ((st32) ((Rxx >> 0x20) & 0xffffffff))) + ((st64) ((st32) ((st16) ((ut8) ((Rss >> 0x20) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x20) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((ut8) ((Rss >> 0x28) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x28) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((ut8) ((Rss >> 0x30) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x30) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((ut8) ((Rss >> 0x38) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x38) & ((st64) 0xff)))))) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_123 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_124 = LOGNOT(op_LSHIFT_123); + RzILOpPure *op_AND_125 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_124); + RzILOpPure *op_RSHIFT_129 = SHIFTRA(READ_REG(pkt, Rxx_op, false), SN(32, 0x20)); + RzILOpPure *op_AND_131 = LOGAND(op_RSHIFT_129, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_137 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_140 = LOGAND(op_RSHIFT_137, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_147 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_150 = LOGAND(op_RSHIFT_147, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_154 = MUL(CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_140))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_140)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_150))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_150))))); + RzILOpPure *op_ADD_156 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_131), DUP(op_AND_131))), CAST(32, MSB(DUP(op_AND_131)), DUP(op_AND_131))), CAST(64, MSB(op_MUL_154), DUP(op_MUL_154))); + RzILOpPure *op_RSHIFT_160 = SHIFTRA(DUP(Rss), SN(32, 0x28)); + RzILOpPure *op_AND_163 = LOGAND(op_RSHIFT_160, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_170 = SHIFTRA(DUP(Rtt), SN(32, 0x28)); + RzILOpPure *op_AND_173 = LOGAND(op_RSHIFT_170, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_177 = MUL(CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_163))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_163)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_173))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_173))))); + RzILOpPure *op_ADD_179 = ADD(op_ADD_156, CAST(64, MSB(op_MUL_177), DUP(op_MUL_177))); + RzILOpPure *op_RSHIFT_183 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_186 = LOGAND(op_RSHIFT_183, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_193 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_196 = LOGAND(op_RSHIFT_193, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_200 = MUL(CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_186))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_186)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_196))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_196))))); + RzILOpPure *op_ADD_202 = ADD(op_ADD_179, CAST(64, MSB(op_MUL_200), DUP(op_MUL_200))); + RzILOpPure *op_RSHIFT_206 = SHIFTRA(DUP(Rss), SN(32, 0x38)); + RzILOpPure *op_AND_209 = LOGAND(op_RSHIFT_206, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_216 = SHIFTRA(DUP(Rtt), SN(32, 0x38)); + RzILOpPure *op_AND_219 = LOGAND(op_RSHIFT_216, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_223 = MUL(CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_209))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_209)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_219))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_219))))); + RzILOpPure *op_ADD_225 = ADD(op_ADD_202, CAST(64, MSB(op_MUL_223), DUP(op_MUL_223))); + RzILOpPure *op_AND_227 = LOGAND(op_ADD_225, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_231 = SHIFTL0(op_AND_227, SN(32, 0x20)); + RzILOpPure *op_OR_232 = LOGOR(op_AND_125, op_LSHIFT_231); + RzILOpEffect *op_ASSIGN_233 = WRITE_REG(bundle, Rxx_op, op_OR_232); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_117, op_ASSIGN_233); + return instruction_sequence; +} + +// Rdd = vrmpybsu(Rss,Rtt) +RzILOpEffect *hex_il_op_m5_vrmpybsu(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((st64) ((st32) ((st16) ((st8) ((Rss >> 0x0) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x0) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x8) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x8) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x10) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x10) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x18) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x18) & ((st64) 0xff)))))) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_12 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_12, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_26 = LOGAND(op_RSHIFT_23, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_30 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_15), DUP(op_AND_15))), CAST(8, MSB(DUP(op_AND_15)), DUP(op_AND_15)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_15)), DUP(op_AND_15))), CAST(8, MSB(DUP(op_AND_15)), DUP(op_AND_15)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_26))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_26))))); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rss), SN(32, 8)); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_35, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_45 = SHIFTRA(DUP(Rtt), SN(32, 8)); + RzILOpPure *op_AND_48 = LOGAND(op_RSHIFT_45, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_52 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_38), DUP(op_AND_38))), CAST(8, MSB(DUP(op_AND_38)), DUP(op_AND_38)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_38)), DUP(op_AND_38))), CAST(8, MSB(DUP(op_AND_38)), DUP(op_AND_38)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_48))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_48))))); + RzILOpPure *op_ADD_54 = ADD(CAST(64, MSB(op_MUL_30), DUP(op_MUL_30)), CAST(64, MSB(op_MUL_52), DUP(op_MUL_52))); + RzILOpPure *op_RSHIFT_58 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_61 = LOGAND(op_RSHIFT_58, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_68 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_71 = LOGAND(op_RSHIFT_68, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_75 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_61), DUP(op_AND_61))), CAST(8, MSB(DUP(op_AND_61)), DUP(op_AND_61)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_61)), DUP(op_AND_61))), CAST(8, MSB(DUP(op_AND_61)), DUP(op_AND_61)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_71))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_71))))); + RzILOpPure *op_ADD_77 = ADD(op_ADD_54, CAST(64, MSB(op_MUL_75), DUP(op_MUL_75))); + RzILOpPure *op_RSHIFT_81 = SHIFTRA(DUP(Rss), SN(32, 24)); + RzILOpPure *op_AND_84 = LOGAND(op_RSHIFT_81, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_91 = SHIFTRA(DUP(Rtt), SN(32, 24)); + RzILOpPure *op_AND_94 = LOGAND(op_RSHIFT_91, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_98 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_84), DUP(op_AND_84))), CAST(8, MSB(DUP(op_AND_84)), DUP(op_AND_84)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_84)), DUP(op_AND_84))), CAST(8, MSB(DUP(op_AND_84)), DUP(op_AND_84)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_94))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_94))))); + RzILOpPure *op_ADD_100 = ADD(op_ADD_77, CAST(64, MSB(op_MUL_98), DUP(op_MUL_98))); + RzILOpPure *op_AND_102 = LOGAND(op_ADD_100, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_106 = SHIFTL0(op_AND_102, SN(32, 0)); + RzILOpPure *op_OR_107 = LOGOR(op_AND_7, op_LSHIFT_106); + RzILOpEffect *op_ASSIGN_108 = WRITE_REG(bundle, Rdd_op, op_OR_107); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((st64) ((st32) ((st16) ((st8) ((Rss >> 0x20) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x20) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x28) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x28) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x30) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x30) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((st8) ((Rss >> 0x38) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x38) & ((st64) 0xff)))))) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_114 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_115 = LOGNOT(op_LSHIFT_114); + RzILOpPure *op_AND_116 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_115); + RzILOpPure *op_RSHIFT_120 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_123 = LOGAND(op_RSHIFT_120, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_130 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_133 = LOGAND(op_RSHIFT_130, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_137 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_123), DUP(op_AND_123))), CAST(8, MSB(DUP(op_AND_123)), DUP(op_AND_123)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_123)), DUP(op_AND_123))), CAST(8, MSB(DUP(op_AND_123)), DUP(op_AND_123)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_133))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_133))))); + RzILOpPure *op_RSHIFT_142 = SHIFTRA(DUP(Rss), SN(32, 0x28)); + RzILOpPure *op_AND_145 = LOGAND(op_RSHIFT_142, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_152 = SHIFTRA(DUP(Rtt), SN(32, 0x28)); + RzILOpPure *op_AND_155 = LOGAND(op_RSHIFT_152, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_159 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_145), DUP(op_AND_145))), CAST(8, MSB(DUP(op_AND_145)), DUP(op_AND_145)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_145)), DUP(op_AND_145))), CAST(8, MSB(DUP(op_AND_145)), DUP(op_AND_145)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_155))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_155))))); + RzILOpPure *op_ADD_161 = ADD(CAST(64, MSB(op_MUL_137), DUP(op_MUL_137)), CAST(64, MSB(op_MUL_159), DUP(op_MUL_159))); + RzILOpPure *op_RSHIFT_165 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_168 = LOGAND(op_RSHIFT_165, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_175 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_178 = LOGAND(op_RSHIFT_175, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_182 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_168), DUP(op_AND_168))), CAST(8, MSB(DUP(op_AND_168)), DUP(op_AND_168)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_168)), DUP(op_AND_168))), CAST(8, MSB(DUP(op_AND_168)), DUP(op_AND_168)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_178))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_178))))); + RzILOpPure *op_ADD_184 = ADD(op_ADD_161, CAST(64, MSB(op_MUL_182), DUP(op_MUL_182))); + RzILOpPure *op_RSHIFT_188 = SHIFTRA(DUP(Rss), SN(32, 0x38)); + RzILOpPure *op_AND_191 = LOGAND(op_RSHIFT_188, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_198 = SHIFTRA(DUP(Rtt), SN(32, 0x38)); + RzILOpPure *op_AND_201 = LOGAND(op_RSHIFT_198, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_205 = MUL(CAST(32, MSB(CAST(16, MSB(CAST(8, MSB(op_AND_191), DUP(op_AND_191))), CAST(8, MSB(DUP(op_AND_191)), DUP(op_AND_191)))), CAST(16, MSB(CAST(8, MSB(DUP(op_AND_191)), DUP(op_AND_191))), CAST(8, MSB(DUP(op_AND_191)), DUP(op_AND_191)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_201))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_201))))); + RzILOpPure *op_ADD_207 = ADD(op_ADD_184, CAST(64, MSB(op_MUL_205), DUP(op_MUL_205))); + RzILOpPure *op_AND_209 = LOGAND(op_ADD_207, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_213 = SHIFTL0(op_AND_209, SN(32, 0x20)); + RzILOpPure *op_OR_214 = LOGOR(op_AND_116, op_LSHIFT_213); + RzILOpEffect *op_ASSIGN_215 = WRITE_REG(bundle, Rdd_op, op_OR_214); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_108, op_ASSIGN_215); + return instruction_sequence; +} + +// Rdd = vrmpybu(Rss,Rtt) +RzILOpEffect *hex_il_op_m5_vrmpybuu(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((st64) ((st32) ((st16) ((ut8) ((Rss >> 0x0) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x0) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((ut8) ((Rss >> 0x8) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x8) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((ut8) ((Rss >> 0x10) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x10) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((ut8) ((Rss >> 0x18) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x18) & ((st64) 0xff)))))) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_12 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_12, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_26 = LOGAND(op_RSHIFT_23, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_30 = MUL(CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_15))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_15)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_26))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_26))))); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rss), SN(32, 8)); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_35, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_45 = SHIFTRA(DUP(Rtt), SN(32, 8)); + RzILOpPure *op_AND_48 = LOGAND(op_RSHIFT_45, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_52 = MUL(CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_38))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_38)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_48))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_48))))); + RzILOpPure *op_ADD_54 = ADD(CAST(64, MSB(op_MUL_30), DUP(op_MUL_30)), CAST(64, MSB(op_MUL_52), DUP(op_MUL_52))); + RzILOpPure *op_RSHIFT_58 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_61 = LOGAND(op_RSHIFT_58, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_68 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_71 = LOGAND(op_RSHIFT_68, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_75 = MUL(CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_61))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_61)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_71))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_71))))); + RzILOpPure *op_ADD_77 = ADD(op_ADD_54, CAST(64, MSB(op_MUL_75), DUP(op_MUL_75))); + RzILOpPure *op_RSHIFT_81 = SHIFTRA(DUP(Rss), SN(32, 24)); + RzILOpPure *op_AND_84 = LOGAND(op_RSHIFT_81, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_91 = SHIFTRA(DUP(Rtt), SN(32, 24)); + RzILOpPure *op_AND_94 = LOGAND(op_RSHIFT_91, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_98 = MUL(CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_84))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_84)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_94))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_94))))); + RzILOpPure *op_ADD_100 = ADD(op_ADD_77, CAST(64, MSB(op_MUL_98), DUP(op_MUL_98))); + RzILOpPure *op_AND_102 = LOGAND(op_ADD_100, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_106 = SHIFTL0(op_AND_102, SN(32, 0)); + RzILOpPure *op_OR_107 = LOGOR(op_AND_7, op_LSHIFT_106); + RzILOpEffect *op_ASSIGN_108 = WRITE_REG(bundle, Rdd_op, op_OR_107); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((st64) ((st32) ((st16) ((ut8) ((Rss >> 0x20) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x20) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((ut8) ((Rss >> 0x28) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x28) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((ut8) ((Rss >> 0x30) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x30) & ((st64) 0xff)))))) + ((st64) ((st32) ((st16) ((ut8) ((Rss >> 0x38) & ((st64) 0xff))))) * ((st32) ((st16) ((ut8) ((Rtt >> 0x38) & ((st64) 0xff)))))) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_114 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_115 = LOGNOT(op_LSHIFT_114); + RzILOpPure *op_AND_116 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_115); + RzILOpPure *op_RSHIFT_120 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_123 = LOGAND(op_RSHIFT_120, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_130 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_133 = LOGAND(op_RSHIFT_130, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_137 = MUL(CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_123))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_123)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_133))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_133))))); + RzILOpPure *op_RSHIFT_142 = SHIFTRA(DUP(Rss), SN(32, 0x28)); + RzILOpPure *op_AND_145 = LOGAND(op_RSHIFT_142, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_152 = SHIFTRA(DUP(Rtt), SN(32, 0x28)); + RzILOpPure *op_AND_155 = LOGAND(op_RSHIFT_152, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_159 = MUL(CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_145))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_145)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_155))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_155))))); + RzILOpPure *op_ADD_161 = ADD(CAST(64, MSB(op_MUL_137), DUP(op_MUL_137)), CAST(64, MSB(op_MUL_159), DUP(op_MUL_159))); + RzILOpPure *op_RSHIFT_165 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_168 = LOGAND(op_RSHIFT_165, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_175 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_178 = LOGAND(op_RSHIFT_175, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_182 = MUL(CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_168))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_168)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_178))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_178))))); + RzILOpPure *op_ADD_184 = ADD(op_ADD_161, CAST(64, MSB(op_MUL_182), DUP(op_MUL_182))); + RzILOpPure *op_RSHIFT_188 = SHIFTRA(DUP(Rss), SN(32, 0x38)); + RzILOpPure *op_AND_191 = LOGAND(op_RSHIFT_188, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_RSHIFT_198 = SHIFTRA(DUP(Rtt), SN(32, 0x38)); + RzILOpPure *op_AND_201 = LOGAND(op_RSHIFT_198, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_205 = MUL(CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_191))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_191)))), CAST(32, MSB(CAST(16, IL_FALSE, CAST(8, IL_FALSE, op_AND_201))), CAST(16, IL_FALSE, CAST(8, IL_FALSE, DUP(op_AND_201))))); + RzILOpPure *op_ADD_207 = ADD(op_ADD_184, CAST(64, MSB(op_MUL_205), DUP(op_MUL_205))); + RzILOpPure *op_AND_209 = LOGAND(op_ADD_207, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_213 = SHIFTL0(op_AND_209, SN(32, 0x20)); + RzILOpPure *op_OR_214 = LOGOR(op_AND_116, op_LSHIFT_213); + RzILOpEffect *op_ASSIGN_215 = WRITE_REG(bundle, Rdd_op, op_OR_214); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_108, op_ASSIGN_215); + return instruction_sequence; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_M6_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_M6_ops.c new file mode 100644 index 00000000000..49c567ff4e7 --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_M6_ops.c @@ -0,0 +1,169 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// Rdd = vabsdiffb(Rtt,Rss) +RzILOpEffect *hex_il_op_m6_vabsdiffb(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp449 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp449", VARL("i")); + + // seq(h_tmp449 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x8)))) | (((ut64) (((st64) ((((st32) ((st8) ((Rtt >> i * 0x8) & ((st64) 0xff)))) - ((st32) ((st8) ((Rss >> i * 0x8) & ((st64) 0xff)))) < 0x0) ? (-((st32) ((st8) ((Rtt >> i * 0x8) & ((st64) 0xff)))) - ((st32) ((st8) ((Rss >> i * 0x8) & ((st64) 0xff))))) : ((st32) ((st8) ((Rtt >> i * 0x8) & ((st64) 0xff)))) - ((st32) ((st8) ((Rss >> i * 0x8) & ((st64) 0xff)))))) & 0xff)) << i * 0x8))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rtt, op_MUL_18); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rss, op_MUL_26); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_27, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_SUB_34 = SUB(CAST(32, MSB(CAST(8, MSB(op_AND_22), DUP(op_AND_22))), CAST(8, MSB(DUP(op_AND_22)), DUP(op_AND_22))), CAST(32, MSB(CAST(8, MSB(op_AND_30), DUP(op_AND_30))), CAST(8, MSB(DUP(op_AND_30)), DUP(op_AND_30)))); + RzILOpPure *op_LT_36 = SLT(op_SUB_34, SN(32, 0)); + RzILOpPure *op_MUL_38 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_39 = SHIFTRA(DUP(Rtt), op_MUL_38); + RzILOpPure *op_AND_42 = LOGAND(op_RSHIFT_39, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_45 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_46 = SHIFTRA(DUP(Rss), op_MUL_45); + RzILOpPure *op_AND_49 = LOGAND(op_RSHIFT_46, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_SUB_53 = SUB(CAST(32, MSB(CAST(8, MSB(op_AND_42), DUP(op_AND_42))), CAST(8, MSB(DUP(op_AND_42)), DUP(op_AND_42))), CAST(32, MSB(CAST(8, MSB(op_AND_49), DUP(op_AND_49))), CAST(8, MSB(DUP(op_AND_49)), DUP(op_AND_49)))); + RzILOpPure *op_NEG_54 = NEG(op_SUB_53); + RzILOpPure *op_MUL_56 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_57 = SHIFTRA(DUP(Rtt), op_MUL_56); + RzILOpPure *op_AND_60 = LOGAND(op_RSHIFT_57, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_63 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_64 = SHIFTRA(DUP(Rss), op_MUL_63); + RzILOpPure *op_AND_67 = LOGAND(op_RSHIFT_64, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_SUB_71 = SUB(CAST(32, MSB(CAST(8, MSB(op_AND_60), DUP(op_AND_60))), CAST(8, MSB(DUP(op_AND_60)), DUP(op_AND_60))), CAST(32, MSB(CAST(8, MSB(op_AND_67), DUP(op_AND_67))), CAST(8, MSB(DUP(op_AND_67)), DUP(op_AND_67)))); + RzILOpPure *cond_72 = ITE(op_LT_36, op_NEG_54, op_SUB_71); + RzILOpPure *op_AND_75 = LOGAND(CAST(64, MSB(cond_72), DUP(cond_72)), SN(64, 0xff)); + RzILOpPure *op_MUL_78 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_79 = SHIFTL0(CAST(64, IL_FALSE, op_AND_75), op_MUL_78); + RzILOpPure *op_OR_81 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_79); + RzILOpEffect *op_ASSIGN_83 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_81)); + + // seq(h_tmp449; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x8)) ...; + RzILOpEffect *seq_85 = op_ASSIGN_83; + + // seq(seq(h_tmp449; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0 ...; + RzILOpEffect *seq_86 = SEQN(2, seq_85, seq_8); + + // while ((i < 0x8)) { seq(seq(h_tmp449; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0 ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 8)); + RzILOpEffect *for_87 = REPEAT(op_LT_4, seq_86); + + // seq(i = 0x0; while ((i < 0x8)) { seq(seq(h_tmp449; Rdd = ((st64) ...; + RzILOpEffect *seq_88 = SEQN(2, op_ASSIGN_2, for_87); + + RzILOpEffect *instruction_sequence = seq_88; + return instruction_sequence; +} + +// Rdd = vabsdiffub(Rtt,Rss) +RzILOpEffect *hex_il_op_m6_vabsdiffub(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp450 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp450", VARL("i")); + + // seq(h_tmp450 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x8)))) | (((ut64) (((st64) ((((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff)))) - ((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))) < 0x0) ? (-((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff)))) - ((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff))))) : ((st32) ((ut8) ((Rtt >> i * 0x8) & ((st64) 0xff)))) - ((st32) ((ut8) ((Rss >> i * 0x8) & ((st64) 0xff)))))) & 0xff)) << i * 0x8))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rtt, op_MUL_18); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_26 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(Rss, op_MUL_26); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_27, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_SUB_34 = SUB(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_22)), CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_30))); + RzILOpPure *op_LT_36 = SLT(op_SUB_34, SN(32, 0)); + RzILOpPure *op_MUL_38 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_39 = SHIFTRA(DUP(Rtt), op_MUL_38); + RzILOpPure *op_AND_42 = LOGAND(op_RSHIFT_39, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_45 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_46 = SHIFTRA(DUP(Rss), op_MUL_45); + RzILOpPure *op_AND_49 = LOGAND(op_RSHIFT_46, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_SUB_53 = SUB(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_42)), CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_49))); + RzILOpPure *op_NEG_54 = NEG(op_SUB_53); + RzILOpPure *op_MUL_56 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_57 = SHIFTRA(DUP(Rtt), op_MUL_56); + RzILOpPure *op_AND_60 = LOGAND(op_RSHIFT_57, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_MUL_63 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_64 = SHIFTRA(DUP(Rss), op_MUL_63); + RzILOpPure *op_AND_67 = LOGAND(op_RSHIFT_64, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_SUB_71 = SUB(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_60)), CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_67))); + RzILOpPure *cond_72 = ITE(op_LT_36, op_NEG_54, op_SUB_71); + RzILOpPure *op_AND_75 = LOGAND(CAST(64, MSB(cond_72), DUP(cond_72)), SN(64, 0xff)); + RzILOpPure *op_MUL_78 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_79 = SHIFTL0(CAST(64, IL_FALSE, op_AND_75), op_MUL_78); + RzILOpPure *op_OR_81 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_79); + RzILOpEffect *op_ASSIGN_83 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_81)); + + // seq(h_tmp450; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x8)) ...; + RzILOpEffect *seq_85 = op_ASSIGN_83; + + // seq(seq(h_tmp450; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0 ...; + RzILOpEffect *seq_86 = SEQN(2, seq_85, seq_8); + + // while ((i < 0x8)) { seq(seq(h_tmp450; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0 ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 8)); + RzILOpEffect *for_87 = REPEAT(op_LT_4, seq_86); + + // seq(i = 0x0; while ((i < 0x8)) { seq(seq(h_tmp450; Rdd = ((st64) ...; + RzILOpEffect *seq_88 = SEQN(2, op_ASSIGN_2, for_87); + + RzILOpEffect *instruction_sequence = seq_88; + return instruction_sequence; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_M7_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_M7_ops.c new file mode 100644 index 00000000000..83d2bae7fc6 --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_M7_ops.c @@ -0,0 +1,297 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// Rdd = cmpyiw(Rss,Rtt) +RzILOpEffect *hex_il_op_m7_dcmpyiw(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rdd = ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * ((st64) ((st32) ((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))))) + ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * ((st64) ((st32) ((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rtt, SN(32, 0x20)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_23 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_7), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))))), CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_18), DUP(op_AND_18))), CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18)))))); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_29 = LOGAND(op_RSHIFT_27, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_39 = LOGAND(op_RSHIFT_37, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_44 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_29), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))))), CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_39), DUP(op_AND_39))), CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39))), CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39))), CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39))), CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39)))))); + RzILOpPure *op_ADD_45 = ADD(op_MUL_23, op_MUL_44); + RzILOpEffect *op_ASSIGN_46 = WRITE_REG(bundle, Rdd_op, op_ADD_45); + + RzILOpEffect *instruction_sequence = op_ASSIGN_46; + return instruction_sequence; +} + +// Rxx += cmpyiw(Rss,Rtt) +RzILOpEffect *hex_il_op_m7_dcmpyiw_acc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rxx = Rxx + ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * ((st64) ((st32) ((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))))) + ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * ((st64) ((st32) ((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rtt, SN(32, 0x20)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_23 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_7), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))))), CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_18), DUP(op_AND_18))), CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18)))))); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_29 = LOGAND(op_RSHIFT_27, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_39 = LOGAND(op_RSHIFT_37, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_44 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_29), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))))), CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_39), DUP(op_AND_39))), CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39))), CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39))), CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39))), CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39)))))); + RzILOpPure *op_ADD_45 = ADD(op_MUL_23, op_MUL_44); + RzILOpPure *op_ADD_46 = ADD(READ_REG(pkt, Rxx_op, false), op_ADD_45); + RzILOpEffect *op_ASSIGN_ADD_47 = WRITE_REG(bundle, Rxx_op, op_ADD_46); + + RzILOpEffect *instruction_sequence = op_ASSIGN_ADD_47; + return instruction_sequence; +} + +// Rdd = cmpyiw(Rss,Rtt*) +RzILOpEffect *hex_il_op_m7_dcmpyiwc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rdd = ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * ((st64) ((st32) ((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))))) - ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * ((st64) ((st32) ((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_23 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_7), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))))), CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_18), DUP(op_AND_18))), CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18)))))); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_29 = LOGAND(op_RSHIFT_27, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_39 = LOGAND(op_RSHIFT_37, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_44 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_29), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))))), CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_39), DUP(op_AND_39))), CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39))), CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39))), CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39))), CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39)))))); + RzILOpPure *op_SUB_45 = SUB(op_MUL_23, op_MUL_44); + RzILOpEffect *op_ASSIGN_46 = WRITE_REG(bundle, Rdd_op, op_SUB_45); + + RzILOpEffect *instruction_sequence = op_ASSIGN_46; + return instruction_sequence; +} + +// Rxx += cmpyiw(Rss,Rtt*) +RzILOpEffect *hex_il_op_m7_dcmpyiwc_acc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rxx = Rxx + ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * ((st64) ((st32) ((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))))) - ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * ((st64) ((st32) ((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rss, SN(32, 0x20)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_23 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_7), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))))), CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_18), DUP(op_AND_18))), CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18)))))); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_29 = LOGAND(op_RSHIFT_27, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_39 = LOGAND(op_RSHIFT_37, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_44 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_29), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))))), CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_39), DUP(op_AND_39))), CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39))), CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39))), CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39))), CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39)))))); + RzILOpPure *op_SUB_45 = SUB(op_MUL_23, op_MUL_44); + RzILOpPure *op_ADD_46 = ADD(READ_REG(pkt, Rxx_op, false), op_SUB_45); + RzILOpEffect *op_ASSIGN_ADD_47 = WRITE_REG(bundle, Rxx_op, op_ADD_46); + + RzILOpEffect *instruction_sequence = op_ASSIGN_ADD_47; + return instruction_sequence; +} + +// Rdd = cmpyrw(Rss,Rtt) +RzILOpEffect *hex_il_op_m7_dcmpyrw(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rdd = ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * ((st64) ((st32) ((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))))) - ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * ((st64) ((st32) ((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_23 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_7), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))))), CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_18), DUP(op_AND_18))), CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18)))))); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_29 = LOGAND(op_RSHIFT_27, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_39 = LOGAND(op_RSHIFT_37, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_44 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_29), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))))), CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_39), DUP(op_AND_39))), CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39))), CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39))), CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39))), CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39)))))); + RzILOpPure *op_SUB_45 = SUB(op_MUL_23, op_MUL_44); + RzILOpEffect *op_ASSIGN_46 = WRITE_REG(bundle, Rdd_op, op_SUB_45); + + RzILOpEffect *instruction_sequence = op_ASSIGN_46; + return instruction_sequence; +} + +// Rxx += cmpyrw(Rss,Rtt) +RzILOpEffect *hex_il_op_m7_dcmpyrw_acc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rxx = Rxx + ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * ((st64) ((st32) ((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))))) - ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * ((st64) ((st32) ((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_23 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_7), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))))), CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_18), DUP(op_AND_18))), CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18)))))); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_29 = LOGAND(op_RSHIFT_27, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_39 = LOGAND(op_RSHIFT_37, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_44 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_29), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))))), CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_39), DUP(op_AND_39))), CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39))), CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39))), CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39))), CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39)))))); + RzILOpPure *op_SUB_45 = SUB(op_MUL_23, op_MUL_44); + RzILOpPure *op_ADD_46 = ADD(READ_REG(pkt, Rxx_op, false), op_SUB_45); + RzILOpEffect *op_ASSIGN_ADD_47 = WRITE_REG(bundle, Rxx_op, op_ADD_46); + + RzILOpEffect *instruction_sequence = op_ASSIGN_ADD_47; + return instruction_sequence; +} + +// Rdd = cmpyrw(Rss,Rtt*) +RzILOpEffect *hex_il_op_m7_dcmpyrwc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rdd = ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * ((st64) ((st32) ((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))))) + ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * ((st64) ((st32) ((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_23 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_7), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))))), CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_18), DUP(op_AND_18))), CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18)))))); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_29 = LOGAND(op_RSHIFT_27, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_39 = LOGAND(op_RSHIFT_37, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_44 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_29), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))))), CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_39), DUP(op_AND_39))), CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39))), CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39))), CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39))), CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39)))))); + RzILOpPure *op_ADD_45 = ADD(op_MUL_23, op_MUL_44); + RzILOpEffect *op_ASSIGN_46 = WRITE_REG(bundle, Rdd_op, op_ADD_45); + + RzILOpEffect *instruction_sequence = op_ASSIGN_46; + return instruction_sequence; +} + +// Rxx += cmpyrw(Rss,Rtt*) +RzILOpEffect *hex_il_op_m7_dcmpyrwc_acc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rxx = Rxx + ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))))) * ((st64) ((st32) ((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))))) + ((st64) ((st32) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))))) * ((st64) ((st32) ((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))))); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_7 = LOGAND(op_RSHIFT_5, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_23 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_7), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))), CAST(32, MSB(DUP(op_AND_7)), DUP(op_AND_7))))), CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_18), DUP(op_AND_18))), CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(DUP(op_AND_18)), DUP(op_AND_18)))))); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_29 = LOGAND(op_RSHIFT_27, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_39 = LOGAND(op_RSHIFT_37, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_44 = MUL(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_29), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))), CAST(32, MSB(DUP(op_AND_29)), DUP(op_AND_29))))), CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_39), DUP(op_AND_39))), CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39))), CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39))), CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39))), CAST(32, MSB(DUP(op_AND_39)), DUP(op_AND_39)))))); + RzILOpPure *op_ADD_45 = ADD(op_MUL_23, op_MUL_44); + RzILOpPure *op_ADD_46 = ADD(READ_REG(pkt, Rxx_op, false), op_ADD_45); + RzILOpEffect *op_ASSIGN_ADD_47 = WRITE_REG(bundle, Rxx_op, op_ADD_46); + + RzILOpEffect *instruction_sequence = op_ASSIGN_ADD_47; + return instruction_sequence; +} + +// Rd = cmpyiw(Rss,Rtt):<<1:sat +RzILOpEffect *hex_il_op_m7_wcmpyiw(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = cmpyiw(Rss,Rtt):<<1:rnd:sat +RzILOpEffect *hex_il_op_m7_wcmpyiw_rnd(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = cmpyiw(Rss,Rtt*):<<1:sat +RzILOpEffect *hex_il_op_m7_wcmpyiwc(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = cmpyiw(Rss,Rtt*):<<1:rnd:sat +RzILOpEffect *hex_il_op_m7_wcmpyiwc_rnd(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = cmpyrw(Rss,Rtt):<<1:sat +RzILOpEffect *hex_il_op_m7_wcmpyrw(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = cmpyrw(Rss,Rtt):<<1:rnd:sat +RzILOpEffect *hex_il_op_m7_wcmpyrw_rnd(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = cmpyrw(Rss,Rtt*):<<1:sat +RzILOpEffect *hex_il_op_m7_wcmpyrwc(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = cmpyrw(Rss,Rtt*):<<1:rnd:sat +RzILOpEffect *hex_il_op_m7_wcmpyrwc_rnd(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_PS_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_PS_ops.c new file mode 100644 index 00000000000..23bd6ebb61c --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_PS_ops.c @@ -0,0 +1,104 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: 7cbf1a2591520c2491aa35339f227775f4d3adf6 +// LLVM commit date: 2023-06-10 15:58:16 -0700 (ISO 8601 format) +// Date of code generation: 2023-11-07 17:32:44-05:00 +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: 7cbf1a2591520c2491aa35339f227775f4d3adf6 +// LLVM commit date: 2023-06-10 15:58:16 -0700 (ISO 8601 format) +// Date of code generation: 2023-11-07 14:54:00-05:00 +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: 7cbf1a2591520c2491aa35339f227775f4d3adf6 +// LLVM commit date: 2023-06-10 15:58:16 -0700 (ISO 8601 format) +// Date of code generation: 2023-11-07 14:33:13-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// Rd = memb(Ii) +RzILOpEffect *hex_il_op_ps_loadrbabs(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rdd = memd(Ii) +RzILOpEffect *hex_il_op_ps_loadrdabs(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = memh(Ii) +RzILOpEffect *hex_il_op_ps_loadrhabs(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = memw(Ii) +RzILOpEffect *hex_il_op_ps_loadriabs(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = memub(Ii) +RzILOpEffect *hex_il_op_ps_loadrubabs(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = memuh(Ii) +RzILOpEffect *hex_il_op_ps_loadruhabs(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// memb(Ii) = Rt +RzILOpEffect *hex_il_op_ps_storerbabs(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// memb(Ii) = Nt.new +RzILOpEffect *hex_il_op_ps_storerbnewabs(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// memd(Ii) = Rtt +RzILOpEffect *hex_il_op_ps_storerdabs(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// memh(Ii) = Rt.h +RzILOpEffect *hex_il_op_ps_storerfabs(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// memh(Ii) = Rt +RzILOpEffect *hex_il_op_ps_storerhabs(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// memh(Ii) = Nt.new +RzILOpEffect *hex_il_op_ps_storerhnewabs(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// memw(Ii) = Rt +RzILOpEffect *hex_il_op_ps_storeriabs(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// memw(Ii) = Nt.new +RzILOpEffect *hex_il_op_ps_storerinewabs(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// trap1(Ii) +RzILOpEffect *hex_il_op_ps_trap1(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_R6_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_R6_ops.c new file mode 100644 index 00000000000..9f8467f3f41 --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_R6_ops.c @@ -0,0 +1,27 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// release(Rs):at +RzILOpEffect *hex_il_op_r6_release_at_vi(HexInsnPktBundle *bundle) { + return NOP(); +} + +// release(Rs):st +RzILOpEffect *hex_il_op_r6_release_st_vi(HexInsnPktBundle *bundle) { + return NOP(); +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_S2_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_S2_ops.c new file mode 100644 index 00000000000..dad68401911 --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_S2_ops.c @@ -0,0 +1,11781 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// Rd = addasl(Rt,Rs,Ii) +RzILOpEffect *hex_il_op_s2_addasl_rrri(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // Rd = Rt + ((u >= ((ut32) 0x20)) ? 0x0 : (Rs << u)); + RzILOpPure *op_GE_9 = UGE(VARL("u"), CAST(32, IL_FALSE, SN(32, 0x20))); + RzILOpPure *op_LSHIFT_11 = SHIFTL0(Rs, VARL("u")); + RzILOpPure *cond_12 = ITE(op_GE_9, SN(32, 0), op_LSHIFT_11); + RzILOpPure *op_ADD_13 = ADD(Rt, cond_12); + RzILOpEffect *op_ASSIGN_14 = WRITE_REG(bundle, Rd_op, op_ADD_13); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, op_ASSIGN_14); + return instruction_sequence; +} + +// allocframe(Rx,Ii):raw +RzILOpEffect *hex_il_op_s2_allocframe(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp lr_op = ALIAS2OP(HEX_REG_ALIAS_LR, false); + RzILOpPure *lr = READ_REG(pkt, &lr_op, false); + const HexOp fp_op = ALIAS2OP(HEX_REG_ALIAS_FP, false); + const HexOp framekey_op = ALIAS2OP(HEX_REG_ALIAS_FRAMEKEY, false); + RzILOpPure *framekey = READ_REG(pkt, &framekey_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // EA = ((ut32) Rx + -0x8); + RzILOpPure *op_ADD_4 = ADD(READ_REG(pkt, Rx_op, false), SN(32, -8)); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", CAST(32, IL_FALSE, op_ADD_4)); + + // mem_store_ut64(EA, ((ut64) (((((ut64) lr) << 0x20) | ((ut64) fp)) ^ (((ut64) framekey) << 0x20)))); + RzILOpPure *op_LSHIFT_11 = SHIFTL0(CAST(64, IL_FALSE, lr), SN(32, 0x20)); + RzILOpPure *op_OR_14 = LOGOR(op_LSHIFT_11, CAST(64, IL_FALSE, READ_REG(pkt, &fp_op, true))); + RzILOpPure *op_LSHIFT_18 = SHIFTL0(CAST(64, IL_FALSE, framekey), SN(32, 0x20)); + RzILOpPure *op_XOR_19 = LOGXOR(op_OR_14, op_LSHIFT_18); + RzILOpEffect *ms_cast_ut64_20_21 = STOREW(VARL("EA"), CAST(64, IL_FALSE, op_XOR_19)); + + // fp = EA; + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, &fp_op, VARL("EA")); + + // u = u; + RzILOpEffect *imm_assign_24 = SETL("u", u); + + // Rx = ((st32) EA - u); + RzILOpPure *op_SUB_26 = SUB(VARL("EA"), VARL("u")); + RzILOpEffect *op_ASSIGN_28 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_SUB_26)); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_24, op_ASSIGN_6, ms_cast_ut64_20_21, op_ASSIGN_22, op_ASSIGN_28); + return instruction_sequence; +} + +// Rdd = asl(Rss,Ii) +RzILOpEffect *hex_il_op_s2_asl_i_p(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rdd = ((u >= ((ut32) 0x40)) ? ((st64) 0x0) : (Rss << u)); + RzILOpPure *op_GE_8 = UGE(VARL("u"), CAST(32, IL_FALSE, SN(32, 0x40))); + RzILOpPure *op_LSHIFT_10 = SHIFTL0(Rss, VARL("u")); + RzILOpPure *cond_12 = ITE(op_GE_8, CAST(64, MSB(SN(32, 0)), SN(32, 0)), op_LSHIFT_10); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rdd_op, cond_12); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_13); + return instruction_sequence; +} + +// Rxx += asl(Rss,Ii) +RzILOpEffect *hex_il_op_s2_asl_i_p_acc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rxx = Rxx + ((u >= ((ut32) 0x40)) ? ((st64) 0x0) : (Rss << u)); + RzILOpPure *op_GE_8 = UGE(VARL("u"), CAST(32, IL_FALSE, SN(32, 0x40))); + RzILOpPure *op_LSHIFT_10 = SHIFTL0(Rss, VARL("u")); + RzILOpPure *cond_12 = ITE(op_GE_8, CAST(64, MSB(SN(32, 0)), SN(32, 0)), op_LSHIFT_10); + RzILOpPure *op_ADD_13 = ADD(READ_REG(pkt, Rxx_op, false), cond_12); + RzILOpEffect *op_ASSIGN_14 = WRITE_REG(bundle, Rxx_op, op_ADD_13); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_14); + return instruction_sequence; +} + +// Rxx &= asl(Rss,Ii) +RzILOpEffect *hex_il_op_s2_asl_i_p_and(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rxx = (Rxx & ((u >= ((ut32) 0x40)) ? ((st64) 0x0) : (Rss << u))); + RzILOpPure *op_GE_8 = UGE(VARL("u"), CAST(32, IL_FALSE, SN(32, 0x40))); + RzILOpPure *op_LSHIFT_10 = SHIFTL0(Rss, VARL("u")); + RzILOpPure *cond_12 = ITE(op_GE_8, CAST(64, MSB(SN(32, 0)), SN(32, 0)), op_LSHIFT_10); + RzILOpPure *op_AND_13 = LOGAND(READ_REG(pkt, Rxx_op, false), cond_12); + RzILOpEffect *op_ASSIGN_14 = WRITE_REG(bundle, Rxx_op, op_AND_13); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_14); + return instruction_sequence; +} + +// Rxx -= asl(Rss,Ii) +RzILOpEffect *hex_il_op_s2_asl_i_p_nac(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rxx = Rxx - ((u >= ((ut32) 0x40)) ? ((st64) 0x0) : (Rss << u)); + RzILOpPure *op_GE_8 = UGE(VARL("u"), CAST(32, IL_FALSE, SN(32, 0x40))); + RzILOpPure *op_LSHIFT_10 = SHIFTL0(Rss, VARL("u")); + RzILOpPure *cond_12 = ITE(op_GE_8, CAST(64, MSB(SN(32, 0)), SN(32, 0)), op_LSHIFT_10); + RzILOpPure *op_SUB_13 = SUB(READ_REG(pkt, Rxx_op, false), cond_12); + RzILOpEffect *op_ASSIGN_14 = WRITE_REG(bundle, Rxx_op, op_SUB_13); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_14); + return instruction_sequence; +} + +// Rxx |= asl(Rss,Ii) +RzILOpEffect *hex_il_op_s2_asl_i_p_or(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rxx = (Rxx | ((u >= ((ut32) 0x40)) ? ((st64) 0x0) : (Rss << u))); + RzILOpPure *op_GE_8 = UGE(VARL("u"), CAST(32, IL_FALSE, SN(32, 0x40))); + RzILOpPure *op_LSHIFT_10 = SHIFTL0(Rss, VARL("u")); + RzILOpPure *cond_12 = ITE(op_GE_8, CAST(64, MSB(SN(32, 0)), SN(32, 0)), op_LSHIFT_10); + RzILOpPure *op_OR_13 = LOGOR(READ_REG(pkt, Rxx_op, false), cond_12); + RzILOpEffect *op_ASSIGN_14 = WRITE_REG(bundle, Rxx_op, op_OR_13); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_14); + return instruction_sequence; +} + +// Rxx ^= asl(Rss,Ii) +RzILOpEffect *hex_il_op_s2_asl_i_p_xacc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rxx = (Rxx ^ ((u >= ((ut32) 0x40)) ? ((st64) 0x0) : (Rss << u))); + RzILOpPure *op_GE_8 = UGE(VARL("u"), CAST(32, IL_FALSE, SN(32, 0x40))); + RzILOpPure *op_LSHIFT_10 = SHIFTL0(Rss, VARL("u")); + RzILOpPure *cond_12 = ITE(op_GE_8, CAST(64, MSB(SN(32, 0)), SN(32, 0)), op_LSHIFT_10); + RzILOpPure *op_XOR_13 = LOGXOR(READ_REG(pkt, Rxx_op, false), cond_12); + RzILOpEffect *op_ASSIGN_14 = WRITE_REG(bundle, Rxx_op, op_XOR_13); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_14); + return instruction_sequence; +} + +// Rd = asl(Rs,Ii) +RzILOpEffect *hex_il_op_s2_asl_i_r(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rd = ((u >= ((ut32) 0x20)) ? 0x0 : (Rs << u)); + RzILOpPure *op_GE_8 = UGE(VARL("u"), CAST(32, IL_FALSE, SN(32, 0x20))); + RzILOpPure *op_LSHIFT_10 = SHIFTL0(Rs, VARL("u")); + RzILOpPure *cond_11 = ITE(op_GE_8, SN(32, 0), op_LSHIFT_10); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rd_op, cond_11); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_12); + return instruction_sequence; +} + +// Rx += asl(Rs,Ii) +RzILOpEffect *hex_il_op_s2_asl_i_r_acc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rx = Rx + ((u >= ((ut32) 0x20)) ? 0x0 : (Rs << u)); + RzILOpPure *op_GE_8 = UGE(VARL("u"), CAST(32, IL_FALSE, SN(32, 0x20))); + RzILOpPure *op_LSHIFT_10 = SHIFTL0(Rs, VARL("u")); + RzILOpPure *cond_11 = ITE(op_GE_8, SN(32, 0), op_LSHIFT_10); + RzILOpPure *op_ADD_12 = ADD(READ_REG(pkt, Rx_op, false), cond_11); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rx_op, op_ADD_12); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_13); + return instruction_sequence; +} + +// Rx &= asl(Rs,Ii) +RzILOpEffect *hex_il_op_s2_asl_i_r_and(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rx = (Rx & ((u >= ((ut32) 0x20)) ? 0x0 : (Rs << u))); + RzILOpPure *op_GE_8 = UGE(VARL("u"), CAST(32, IL_FALSE, SN(32, 0x20))); + RzILOpPure *op_LSHIFT_10 = SHIFTL0(Rs, VARL("u")); + RzILOpPure *cond_11 = ITE(op_GE_8, SN(32, 0), op_LSHIFT_10); + RzILOpPure *op_AND_12 = LOGAND(READ_REG(pkt, Rx_op, false), cond_11); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rx_op, op_AND_12); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_13); + return instruction_sequence; +} + +// Rx -= asl(Rs,Ii) +RzILOpEffect *hex_il_op_s2_asl_i_r_nac(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rx = Rx - ((u >= ((ut32) 0x20)) ? 0x0 : (Rs << u)); + RzILOpPure *op_GE_8 = UGE(VARL("u"), CAST(32, IL_FALSE, SN(32, 0x20))); + RzILOpPure *op_LSHIFT_10 = SHIFTL0(Rs, VARL("u")); + RzILOpPure *cond_11 = ITE(op_GE_8, SN(32, 0), op_LSHIFT_10); + RzILOpPure *op_SUB_12 = SUB(READ_REG(pkt, Rx_op, false), cond_11); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rx_op, op_SUB_12); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_13); + return instruction_sequence; +} + +// Rx |= asl(Rs,Ii) +RzILOpEffect *hex_il_op_s2_asl_i_r_or(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rx = (Rx | ((u >= ((ut32) 0x20)) ? 0x0 : (Rs << u))); + RzILOpPure *op_GE_8 = UGE(VARL("u"), CAST(32, IL_FALSE, SN(32, 0x20))); + RzILOpPure *op_LSHIFT_10 = SHIFTL0(Rs, VARL("u")); + RzILOpPure *cond_11 = ITE(op_GE_8, SN(32, 0), op_LSHIFT_10); + RzILOpPure *op_OR_12 = LOGOR(READ_REG(pkt, Rx_op, false), cond_11); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rx_op, op_OR_12); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_13); + return instruction_sequence; +} + +// Rd = asl(Rs,Ii):sat +RzILOpEffect *hex_il_op_s2_asl_i_r_sat(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // u = u; + RzILOpEffect *imm_assign_4 = SETL("u", u); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_45 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((u >= ((ut32) 0x20)) ? ((st64) 0x0) : (((st64) Rs) << u))), 0x0, 0x20) == ((u >= ((ut32) 0x20)) ? ((st64) 0x0) : (((st64) Rs) << u)))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((u >= ((ut32) 0x20)) ? ((st64) 0x0) : (((st64) Rs) << u)) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_GE_11 = UGE(VARL("u"), CAST(32, IL_FALSE, SN(32, 0x20))); + RzILOpPure *op_LSHIFT_14 = SHIFTL0(CAST(64, MSB(Rs), DUP(Rs)), VARL("u")); + RzILOpPure *cond_16 = ITE(op_GE_11, CAST(64, MSB(SN(32, 0)), SN(32, 0)), op_LSHIFT_14); + RzILOpPure *op_GE_26 = UGE(VARL("u"), CAST(32, IL_FALSE, SN(32, 0x20))); + RzILOpPure *op_LSHIFT_29 = SHIFTL0(CAST(64, MSB(DUP(Rs)), DUP(Rs)), VARL("u")); + RzILOpPure *cond_31 = ITE(op_GE_26, CAST(64, MSB(SN(32, 0)), SN(32, 0)), op_LSHIFT_29); + RzILOpPure *op_EQ_32 = EQ(SEXTRACT64(CAST(64, IL_FALSE, cond_16), SN(32, 0), SN(32, 0x20)), cond_31); + RzILOpPure *op_GE_50 = UGE(VARL("u"), CAST(32, IL_FALSE, SN(32, 0x20))); + RzILOpPure *op_LSHIFT_53 = SHIFTL0(CAST(64, MSB(DUP(Rs)), DUP(Rs)), VARL("u")); + RzILOpPure *cond_55 = ITE(op_GE_50, CAST(64, MSB(SN(32, 0)), SN(32, 0)), op_LSHIFT_53); + RzILOpPure *op_LT_58 = SLT(cond_55, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_63 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_64 = NEG(op_LSHIFT_63); + RzILOpPure *op_LSHIFT_69 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_72 = SUB(op_LSHIFT_69, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_73 = ITE(op_LT_58, op_NEG_64, op_SUB_72); + RzILOpEffect *gcc_expr_74 = BRANCH(op_EQ_32, EMPTY(), set_usr_field_call_45); + + // h_tmp451 = HYB(gcc_expr_if ((sextract64(((ut64) ((u >= ((ut32) 0x20)) ? ((st64) 0x0) : (((st64) Rs) << u))), 0x0, 0x20) == ((u >= ((ut32) 0x20)) ? ((st64) 0x0) : (((st64) Rs) << u)))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((u >= ((ut32) 0x20)) ? ((st64) 0x0) : (((st64) Rs) << u)) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_76 = SETL("h_tmp451", cond_73); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((u >= ((ut32) 0x20)) ? ...; + RzILOpEffect *seq_77 = SEQN(2, gcc_expr_74, op_ASSIGN_hybrid_tmp_76); + + // Rd = ((st32) ((sextract64(((ut64) ((u >= ((ut32) 0x20)) ? ((st64) 0x0) : (((st64) Rs) << u))), 0x0, 0x20) == ((u >= ((ut32) 0x20)) ? ((st64) 0x0) : (((st64) Rs) << u))) ? ((u >= ((ut32) 0x20)) ? ((st64) 0x0) : (((st64) Rs) << u)) : h_tmp451)); + RzILOpPure *op_GE_37 = UGE(VARL("u"), CAST(32, IL_FALSE, SN(32, 0x20))); + RzILOpPure *op_LSHIFT_40 = SHIFTL0(CAST(64, MSB(DUP(Rs)), DUP(Rs)), VARL("u")); + RzILOpPure *cond_42 = ITE(op_GE_37, CAST(64, MSB(SN(32, 0)), SN(32, 0)), op_LSHIFT_40); + RzILOpPure *cond_78 = ITE(DUP(op_EQ_32), cond_42, VARL("h_tmp451")); + RzILOpEffect *op_ASSIGN_80 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_78), DUP(cond_78))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((u >= ((ut32) 0x20 ...; + RzILOpEffect *seq_81 = SEQN(2, seq_77, op_ASSIGN_80); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, seq_81); + return instruction_sequence; +} + +// Rx ^= asl(Rs,Ii) +RzILOpEffect *hex_il_op_s2_asl_i_r_xacc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rx = (Rx ^ ((u >= ((ut32) 0x20)) ? 0x0 : (Rs << u))); + RzILOpPure *op_GE_8 = UGE(VARL("u"), CAST(32, IL_FALSE, SN(32, 0x20))); + RzILOpPure *op_LSHIFT_10 = SHIFTL0(Rs, VARL("u")); + RzILOpPure *cond_11 = ITE(op_GE_8, SN(32, 0), op_LSHIFT_10); + RzILOpPure *op_XOR_12 = LOGXOR(READ_REG(pkt, Rx_op, false), cond_11); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rx_op, op_XOR_12); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_13); + return instruction_sequence; +} + +// Rdd = vaslh(Rss,Ii) +RzILOpEffect *hex_il_op_s2_asl_i_vh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp452 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp452", VARL("i")); + + // seq(h_tmp452 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // u = u; + RzILOpEffect *imm_assign_24 = SETL("u", u); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) (((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))) << u)) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rss, op_MUL_18); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_26 = SHIFTL0(CAST(16, MSB(op_AND_22), DUP(op_AND_22)), VARL("u")); + RzILOpPure *op_AND_29 = LOGAND(CAST(32, MSB(op_LSHIFT_26), DUP(op_LSHIFT_26)), SN(32, 0xffff)); + RzILOpPure *op_MUL_32 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_33 = SHIFTL0(CAST(64, IL_FALSE, op_AND_29), op_MUL_32); + RzILOpPure *op_OR_35 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_33); + RzILOpEffect *op_ASSIGN_37 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_35)); + + // seq(h_tmp452; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x1 ...; + RzILOpEffect *seq_39 = op_ASSIGN_37; + + // seq(seq(h_tmp452; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_40 = SEQN(2, seq_39, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp452; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_41 = REPEAT(op_LT_4, seq_40); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp452; Rdd = ((st64) ...; + RzILOpEffect *seq_42 = SEQN(2, op_ASSIGN_2, for_41); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_24, seq_42); + return instruction_sequence; +} + +// Rdd = vaslw(Rss,Ii) +RzILOpEffect *hex_il_op_s2_asl_i_vw(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp453 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp453", VARL("i")); + + // seq(h_tmp453 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // u = u; + RzILOpEffect *imm_assign_24 = SETL("u", u); + + // Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | (((((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) << u) & 0xffffffff) << i * 0x20)); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffffffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rss, op_MUL_18); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_19, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_26 = SHIFTL0(CAST(64, MSB(CAST(32, MSB(op_AND_21), DUP(op_AND_21))), CAST(32, MSB(DUP(op_AND_21)), DUP(op_AND_21))), VARL("u")); + RzILOpPure *op_AND_28 = LOGAND(op_LSHIFT_26, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_30 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_31 = SHIFTL0(op_AND_28, op_MUL_30); + RzILOpPure *op_OR_32 = LOGOR(op_AND_15, op_LSHIFT_31); + RzILOpEffect *op_ASSIGN_33 = WRITE_REG(bundle, Rdd_op, op_OR_32); + + // seq(h_tmp453; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ((((( ...; + RzILOpEffect *seq_35 = op_ASSIGN_33; + + // seq(seq(h_tmp453; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ( ...; + RzILOpEffect *seq_36 = SEQN(2, seq_35, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp453; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_37 = REPEAT(op_LT_4, seq_36); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp453; Rdd = ((Rdd & ...; + RzILOpEffect *seq_38 = SEQN(2, op_ASSIGN_2, for_37); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_24, seq_38); + return instruction_sequence; +} + +// Rdd = asl(Rss,Rt) +RzILOpEffect *hex_il_op_s2_asl_r_p(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rdd = ((shamt < 0x0) ? ((Rss >> (-shamt) - 0x1) >> 0x1) : (Rss << shamt)); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_16 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_18 = SUB(op_NEG_16, SN(32, 1)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rss, op_SUB_18); + RzILOpPure *op_RSHIFT_21 = SHIFTRA(op_RSHIFT_19, SN(32, 1)); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(DUP(Rss), VARL("shamt")); + RzILOpPure *cond_23 = ITE(op_LT_14, op_RSHIFT_21, op_LSHIFT_22); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rdd_op, cond_23); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_24); + return instruction_sequence; +} + +// Rxx += asl(Rss,Rt) +RzILOpEffect *hex_il_op_s2_asl_r_p_acc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rxx = Rxx + ((shamt < 0x0) ? ((Rss >> (-shamt) - 0x1) >> 0x1) : (Rss << shamt)); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_16 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_18 = SUB(op_NEG_16, SN(32, 1)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rss, op_SUB_18); + RzILOpPure *op_RSHIFT_21 = SHIFTRA(op_RSHIFT_19, SN(32, 1)); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(DUP(Rss), VARL("shamt")); + RzILOpPure *cond_23 = ITE(op_LT_14, op_RSHIFT_21, op_LSHIFT_22); + RzILOpPure *op_ADD_24 = ADD(READ_REG(pkt, Rxx_op, false), cond_23); + RzILOpEffect *op_ASSIGN_25 = WRITE_REG(bundle, Rxx_op, op_ADD_24); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_25); + return instruction_sequence; +} + +// Rxx &= asl(Rss,Rt) +RzILOpEffect *hex_il_op_s2_asl_r_p_and(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rxx = (Rxx & ((shamt < 0x0) ? ((Rss >> (-shamt) - 0x1) >> 0x1) : (Rss << shamt))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_16 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_18 = SUB(op_NEG_16, SN(32, 1)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rss, op_SUB_18); + RzILOpPure *op_RSHIFT_21 = SHIFTRA(op_RSHIFT_19, SN(32, 1)); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(DUP(Rss), VARL("shamt")); + RzILOpPure *cond_23 = ITE(op_LT_14, op_RSHIFT_21, op_LSHIFT_22); + RzILOpPure *op_AND_24 = LOGAND(READ_REG(pkt, Rxx_op, false), cond_23); + RzILOpEffect *op_ASSIGN_25 = WRITE_REG(bundle, Rxx_op, op_AND_24); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_25); + return instruction_sequence; +} + +// Rxx -= asl(Rss,Rt) +RzILOpEffect *hex_il_op_s2_asl_r_p_nac(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rxx = Rxx - ((shamt < 0x0) ? ((Rss >> (-shamt) - 0x1) >> 0x1) : (Rss << shamt)); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_16 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_18 = SUB(op_NEG_16, SN(32, 1)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rss, op_SUB_18); + RzILOpPure *op_RSHIFT_21 = SHIFTRA(op_RSHIFT_19, SN(32, 1)); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(DUP(Rss), VARL("shamt")); + RzILOpPure *cond_23 = ITE(op_LT_14, op_RSHIFT_21, op_LSHIFT_22); + RzILOpPure *op_SUB_24 = SUB(READ_REG(pkt, Rxx_op, false), cond_23); + RzILOpEffect *op_ASSIGN_25 = WRITE_REG(bundle, Rxx_op, op_SUB_24); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_25); + return instruction_sequence; +} + +// Rxx |= asl(Rss,Rt) +RzILOpEffect *hex_il_op_s2_asl_r_p_or(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rxx = (Rxx | ((shamt < 0x0) ? ((Rss >> (-shamt) - 0x1) >> 0x1) : (Rss << shamt))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_16 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_18 = SUB(op_NEG_16, SN(32, 1)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rss, op_SUB_18); + RzILOpPure *op_RSHIFT_21 = SHIFTRA(op_RSHIFT_19, SN(32, 1)); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(DUP(Rss), VARL("shamt")); + RzILOpPure *cond_23 = ITE(op_LT_14, op_RSHIFT_21, op_LSHIFT_22); + RzILOpPure *op_OR_24 = LOGOR(READ_REG(pkt, Rxx_op, false), cond_23); + RzILOpEffect *op_ASSIGN_25 = WRITE_REG(bundle, Rxx_op, op_OR_24); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_25); + return instruction_sequence; +} + +// Rxx ^= asl(Rss,Rt) +RzILOpEffect *hex_il_op_s2_asl_r_p_xor(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rxx = (Rxx ^ ((shamt < 0x0) ? ((Rss >> (-shamt) - 0x1) >> 0x1) : (Rss << shamt))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_16 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_18 = SUB(op_NEG_16, SN(32, 1)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rss, op_SUB_18); + RzILOpPure *op_RSHIFT_21 = SHIFTRA(op_RSHIFT_19, SN(32, 1)); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(DUP(Rss), VARL("shamt")); + RzILOpPure *cond_23 = ITE(op_LT_14, op_RSHIFT_21, op_LSHIFT_22); + RzILOpPure *op_XOR_24 = LOGXOR(READ_REG(pkt, Rxx_op, false), cond_23); + RzILOpEffect *op_ASSIGN_25 = WRITE_REG(bundle, Rxx_op, op_XOR_24); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_25); + return instruction_sequence; +} + +// Rd = asl(Rs,Rt) +RzILOpEffect *hex_il_op_s2_asl_r_r(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rd = ((st32) ((shamt < 0x0) ? ((((st64) Rs) >> (-shamt) - 0x1) >> 0x1) : (((st64) Rs) << shamt))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_17 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_19 = SUB(op_NEG_17, SN(32, 1)); + RzILOpPure *op_RSHIFT_20 = SHIFTRA(CAST(64, MSB(Rs), DUP(Rs)), op_SUB_19); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(op_RSHIFT_20, SN(32, 1)); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(CAST(64, MSB(DUP(Rs)), DUP(Rs)), VARL("shamt")); + RzILOpPure *cond_25 = ITE(op_LT_14, op_RSHIFT_22, op_LSHIFT_24); + RzILOpEffect *op_ASSIGN_27 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_25), DUP(cond_25))); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_27); + return instruction_sequence; +} + +// Rx += asl(Rs,Rt) +RzILOpEffect *hex_il_op_s2_asl_r_r_acc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rx = ((st32) ((st64) Rx) + ((shamt < 0x0) ? ((((st64) Rs) >> (-shamt) - 0x1) >> 0x1) : (((st64) Rs) << shamt))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_17 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_19 = SUB(op_NEG_17, SN(32, 1)); + RzILOpPure *op_RSHIFT_20 = SHIFTRA(CAST(64, MSB(Rs), DUP(Rs)), op_SUB_19); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(op_RSHIFT_20, SN(32, 1)); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(CAST(64, MSB(DUP(Rs)), DUP(Rs)), VARL("shamt")); + RzILOpPure *cond_25 = ITE(op_LT_14, op_RSHIFT_22, op_LSHIFT_24); + RzILOpPure *op_ADD_27 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), cond_25); + RzILOpEffect *op_ASSIGN_29 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(op_ADD_27), DUP(op_ADD_27))); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_29); + return instruction_sequence; +} + +// Rx &= asl(Rs,Rt) +RzILOpEffect *hex_il_op_s2_asl_r_r_and(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rx = ((st32) (((st64) Rx) & ((shamt < 0x0) ? ((((st64) Rs) >> (-shamt) - 0x1) >> 0x1) : (((st64) Rs) << shamt)))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_17 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_19 = SUB(op_NEG_17, SN(32, 1)); + RzILOpPure *op_RSHIFT_20 = SHIFTRA(CAST(64, MSB(Rs), DUP(Rs)), op_SUB_19); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(op_RSHIFT_20, SN(32, 1)); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(CAST(64, MSB(DUP(Rs)), DUP(Rs)), VARL("shamt")); + RzILOpPure *cond_25 = ITE(op_LT_14, op_RSHIFT_22, op_LSHIFT_24); + RzILOpPure *op_AND_27 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), cond_25); + RzILOpEffect *op_ASSIGN_29 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(op_AND_27), DUP(op_AND_27))); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_29); + return instruction_sequence; +} + +// Rx -= asl(Rs,Rt) +RzILOpEffect *hex_il_op_s2_asl_r_r_nac(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rx = ((st32) ((st64) Rx) - ((shamt < 0x0) ? ((((st64) Rs) >> (-shamt) - 0x1) >> 0x1) : (((st64) Rs) << shamt))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_17 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_19 = SUB(op_NEG_17, SN(32, 1)); + RzILOpPure *op_RSHIFT_20 = SHIFTRA(CAST(64, MSB(Rs), DUP(Rs)), op_SUB_19); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(op_RSHIFT_20, SN(32, 1)); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(CAST(64, MSB(DUP(Rs)), DUP(Rs)), VARL("shamt")); + RzILOpPure *cond_25 = ITE(op_LT_14, op_RSHIFT_22, op_LSHIFT_24); + RzILOpPure *op_SUB_27 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), cond_25); + RzILOpEffect *op_ASSIGN_29 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(op_SUB_27), DUP(op_SUB_27))); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_29); + return instruction_sequence; +} + +// Rx |= asl(Rs,Rt) +RzILOpEffect *hex_il_op_s2_asl_r_r_or(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rx = ((st32) (((st64) Rx) | ((shamt < 0x0) ? ((((st64) Rs) >> (-shamt) - 0x1) >> 0x1) : (((st64) Rs) << shamt)))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_17 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_19 = SUB(op_NEG_17, SN(32, 1)); + RzILOpPure *op_RSHIFT_20 = SHIFTRA(CAST(64, MSB(Rs), DUP(Rs)), op_SUB_19); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(op_RSHIFT_20, SN(32, 1)); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(CAST(64, MSB(DUP(Rs)), DUP(Rs)), VARL("shamt")); + RzILOpPure *cond_25 = ITE(op_LT_14, op_RSHIFT_22, op_LSHIFT_24); + RzILOpPure *op_OR_27 = LOGOR(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), cond_25); + RzILOpEffect *op_ASSIGN_29 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(op_OR_27), DUP(op_OR_27))); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_29); + return instruction_sequence; +} + +// Rd = asl(Rs,Rt):sat +RzILOpEffect *hex_il_op_s2_asl_r_r_sat(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_40 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) Rs) << shamt)), 0x0, 0x20) == (((st64) Rs) << shamt))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) Rs) << shamt) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_LSHIFT_27 = SHIFTL0(CAST(64, MSB(Rs), DUP(Rs)), VARL("shamt")); + RzILOpPure *op_LSHIFT_34 = SHIFTL0(CAST(64, MSB(DUP(Rs)), DUP(Rs)), VARL("shamt")); + RzILOpPure *op_EQ_35 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_LSHIFT_27), SN(32, 0), SN(32, 0x20)), op_LSHIFT_34); + RzILOpPure *op_LSHIFT_42 = SHIFTL0(CAST(64, MSB(DUP(Rs)), DUP(Rs)), VARL("shamt")); + RzILOpPure *op_LT_45 = SLT(op_LSHIFT_42, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_50 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_51 = NEG(op_LSHIFT_50); + RzILOpPure *op_LSHIFT_56 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_59 = SUB(op_LSHIFT_56, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_60 = ITE(op_LT_45, op_NEG_51, op_SUB_59); + RzILOpEffect *gcc_expr_61 = BRANCH(op_EQ_35, EMPTY(), set_usr_field_call_40); + + // h_tmp454 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) Rs) << shamt)), 0x0, 0x20) == (((st64) Rs) << shamt))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) Rs) << shamt) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_63 = SETL("h_tmp454", cond_60); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) Rs) << shamt)) ...; + RzILOpEffect *seq_64 = SEQN(2, gcc_expr_61, op_ASSIGN_hybrid_tmp_63); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_73 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((((st32) (((sextract64(((ut64) (((st64) Rs) << shamt)), 0x0, 0x20) == (((st64) Rs) << shamt)) ? (((st64) Rs) << shamt) : h_tmp454) ^ ((st64) Rs))) < 0x0)) {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))} else {{}}, ((Rs < 0x0) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_LSHIFT_37 = SHIFTL0(CAST(64, MSB(DUP(Rs)), DUP(Rs)), VARL("shamt")); + RzILOpPure *cond_65 = ITE(DUP(op_EQ_35), op_LSHIFT_37, VARL("h_tmp454")); + RzILOpPure *op_XOR_67 = LOGXOR(cond_65, CAST(64, MSB(DUP(Rs)), DUP(Rs))); + RzILOpPure *op_LT_70 = SLT(CAST(32, MSB(op_XOR_67), DUP(op_XOR_67)), SN(32, 0)); + RzILOpPure *op_LT_75 = SLT(DUP(Rs), SN(32, 0)); + RzILOpPure *op_LSHIFT_80 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_81 = NEG(op_LSHIFT_80); + RzILOpPure *op_LSHIFT_86 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_89 = SUB(op_LSHIFT_86, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_90 = ITE(op_LT_75, op_NEG_81, op_SUB_89); + RzILOpEffect *gcc_expr_91 = BRANCH(op_LT_70, set_usr_field_call_73, EMPTY()); + + // h_tmp455 = HYB(gcc_expr_if ((((st32) (((sextract64(((ut64) (((st64) Rs) << shamt)), 0x0, 0x20) == (((st64) Rs) << shamt)) ? (((st64) Rs) << shamt) : h_tmp454) ^ ((st64) Rs))) < 0x0)) {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))} else {{}}, ((Rs < 0x0) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_93 = SETL("h_tmp455", cond_90); + + // seq(HYB(gcc_expr_if ((((st32) (((sextract64(((ut64) (((st64) Rs) ...; + RzILOpEffect *seq_94 = SEQN(2, gcc_expr_91, op_ASSIGN_hybrid_tmp_93); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_105 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if (((Rs > 0x0) && ((((st64) Rs) << shamt) == ((st64) 0x0)))) {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))} else {{}}, ((Rs < 0x0) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_GT_96 = SGT(DUP(Rs), SN(32, 0)); + RzILOpPure *op_LSHIFT_98 = SHIFTL0(CAST(64, MSB(DUP(Rs)), DUP(Rs)), VARL("shamt")); + RzILOpPure *op_EQ_101 = EQ(op_LSHIFT_98, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_AND_102 = AND(op_GT_96, op_EQ_101); + RzILOpPure *op_LT_107 = SLT(DUP(Rs), SN(32, 0)); + RzILOpPure *op_LSHIFT_112 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_113 = NEG(op_LSHIFT_112); + RzILOpPure *op_LSHIFT_118 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_121 = SUB(op_LSHIFT_118, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_122 = ITE(op_LT_107, op_NEG_113, op_SUB_121); + RzILOpEffect *gcc_expr_123 = BRANCH(op_AND_102, set_usr_field_call_105, EMPTY()); + + // h_tmp456 = HYB(gcc_expr_if (((Rs > 0x0) && ((((st64) Rs) << shamt) == ((st64) 0x0)))) {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))} else {{}}, ((Rs < 0x0) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_125 = SETL("h_tmp456", cond_122); + + // seq(HYB(gcc_expr_if (((Rs > 0x0) && ((((st64) Rs) << shamt) == ( ...; + RzILOpEffect *seq_126 = SEQN(2, gcc_expr_123, op_ASSIGN_hybrid_tmp_125); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_144 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st64) Rs) << shamt)), 0x0, 0x20) == (((st64) Rs) << shamt))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) Rs) << shamt) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_LSHIFT_131 = SHIFTL0(CAST(64, MSB(DUP(Rs)), DUP(Rs)), VARL("shamt")); + RzILOpPure *op_LSHIFT_138 = SHIFTL0(CAST(64, MSB(DUP(Rs)), DUP(Rs)), VARL("shamt")); + RzILOpPure *op_EQ_139 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_LSHIFT_131), SN(32, 0), SN(32, 0x20)), op_LSHIFT_138); + RzILOpPure *op_LSHIFT_146 = SHIFTL0(CAST(64, MSB(DUP(Rs)), DUP(Rs)), VARL("shamt")); + RzILOpPure *op_LT_149 = SLT(op_LSHIFT_146, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_154 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_155 = NEG(op_LSHIFT_154); + RzILOpPure *op_LSHIFT_160 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_163 = SUB(op_LSHIFT_160, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_164 = ITE(op_LT_149, op_NEG_155, op_SUB_163); + RzILOpEffect *gcc_expr_165 = BRANCH(op_EQ_139, EMPTY(), set_usr_field_call_144); + + // h_tmp457 = HYB(gcc_expr_if ((sextract64(((ut64) (((st64) Rs) << shamt)), 0x0, 0x20) == (((st64) Rs) << shamt))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st64) Rs) << shamt) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_167 = SETL("h_tmp457", cond_164); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) Rs) << shamt)) ...; + RzILOpEffect *seq_168 = SEQN(2, gcc_expr_165, op_ASSIGN_hybrid_tmp_167); + + // Rd = ((st32) ((shamt < 0x0) ? ((((st64) Rs) >> (-shamt) - 0x1) >> 0x1) : ((((st32) (((sextract64(((ut64) (((st64) Rs) << shamt)), 0x0, 0x20) == (((st64) Rs) << shamt)) ? (((st64) Rs) << shamt) : h_tmp454) ^ ((st64) Rs))) < 0x0) ? h_tmp455 : (((Rs > 0x0) && ((((st64) Rs) << shamt) == ((st64) 0x0))) ? h_tmp456 : ((sextract64(((ut64) (((st64) Rs) << shamt)), 0x0, 0x20) == (((st64) Rs) << shamt)) ? (((st64) Rs) << shamt) : h_tmp457))))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_17 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_19 = SUB(op_NEG_17, SN(32, 1)); + RzILOpPure *op_RSHIFT_20 = SHIFTRA(CAST(64, MSB(DUP(Rs)), DUP(Rs)), op_SUB_19); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(op_RSHIFT_20, SN(32, 1)); + RzILOpPure *op_LSHIFT_141 = SHIFTL0(CAST(64, MSB(DUP(Rs)), DUP(Rs)), VARL("shamt")); + RzILOpPure *cond_169 = ITE(DUP(op_EQ_139), op_LSHIFT_141, VARL("h_tmp457")); + RzILOpPure *cond_170 = ITE(DUP(op_AND_102), VARL("h_tmp456"), cond_169); + RzILOpPure *cond_171 = ITE(DUP(op_LT_70), VARL("h_tmp455"), cond_170); + RzILOpPure *cond_172 = ITE(op_LT_14, op_RSHIFT_22, cond_171); + RzILOpEffect *op_ASSIGN_174 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_172), DUP(cond_172))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st64) Rs) << sha ...; + RzILOpEffect *seq_175 = SEQN(5, seq_64, seq_94, seq_126, seq_168, op_ASSIGN_174); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, seq_175); + return instruction_sequence; +} + +// Rdd = vaslh(Rss,Rt) +RzILOpEffect *hex_il_op_s2_asl_r_vh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp458 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp458", VARL("i")); + + // seq(h_tmp458 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((sextract64(((ut64) Rt), 0x0, 0x7) < ((st64) 0x0)) ? ((((st64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) >> (-sextract64(((ut64) Rt), 0x0, 0x7)) - ((st64) 0x1)) >> 0x1) : (((st64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) << sextract64(((ut64) Rt), 0x0, 0x7))) & ((st64) 0xffff))) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_LT_27 = SLT(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7)), CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_MUL_30 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_31 = SHIFTRA(Rss, op_MUL_30); + RzILOpPure *op_AND_34 = LOGAND(op_RSHIFT_31, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_45 = NEG(SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7))); + RzILOpPure *op_SUB_48 = SUB(op_NEG_45, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *op_RSHIFT_49 = SHIFTRA(CAST(64, MSB(CAST(16, MSB(op_AND_34), DUP(op_AND_34))), CAST(16, MSB(DUP(op_AND_34)), DUP(op_AND_34))), op_SUB_48); + RzILOpPure *op_RSHIFT_51 = SHIFTRA(op_RSHIFT_49, SN(32, 1)); + RzILOpPure *op_MUL_53 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_54 = SHIFTRA(DUP(Rss), op_MUL_53); + RzILOpPure *op_AND_57 = LOGAND(op_RSHIFT_54, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_68 = SHIFTL0(CAST(64, MSB(CAST(16, MSB(op_AND_57), DUP(op_AND_57))), CAST(16, MSB(DUP(op_AND_57)), DUP(op_AND_57))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7))); + RzILOpPure *cond_69 = ITE(op_LT_27, op_RSHIFT_51, op_LSHIFT_68); + RzILOpPure *op_AND_72 = LOGAND(cond_69, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_75 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_76 = SHIFTL0(CAST(64, IL_FALSE, op_AND_72), op_MUL_75); + RzILOpPure *op_OR_78 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_76); + RzILOpEffect *op_ASSIGN_80 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_78)); + + // seq(h_tmp458; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x1 ...; + RzILOpEffect *seq_82 = op_ASSIGN_80; + + // seq(seq(h_tmp458; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_83 = SEQN(2, seq_82, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp458; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_84 = REPEAT(op_LT_4, seq_83); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp458; Rdd = ((st64) ...; + RzILOpEffect *seq_85 = SEQN(2, op_ASSIGN_2, for_84); + + RzILOpEffect *instruction_sequence = seq_85; + return instruction_sequence; +} + +// Rdd = vaslw(Rss,Rt) +RzILOpEffect *hex_il_op_s2_asl_r_vw(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp459 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp459", VARL("i")); + + // seq(h_tmp459 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ((((sextract64(((ut64) Rt), 0x0, 0x7) < ((st64) 0x0)) ? ((((st64) ((st32) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))))) >> (-sextract64(((ut64) Rt), 0x0, 0x7)) - ((st64) 0x1)) >> 0x1) : (((st64) ((st32) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))))) << sextract64(((ut64) Rt), 0x0, 0x7))) & 0xffffffff) << i * 0x20)); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffffffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_LT_27 = SLT(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7)), CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_MUL_30 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_31 = SHIFTRA(Rss, op_MUL_30); + RzILOpPure *op_AND_33 = LOGAND(op_RSHIFT_31, SN(64, 0xffffffff)); + RzILOpPure *op_NEG_46 = NEG(SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7))); + RzILOpPure *op_SUB_49 = SUB(op_NEG_46, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *op_RSHIFT_50 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_33), DUP(op_AND_33))), CAST(32, MSB(DUP(op_AND_33)), DUP(op_AND_33)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_33)), DUP(op_AND_33))), CAST(32, MSB(DUP(op_AND_33)), DUP(op_AND_33))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_33)), DUP(op_AND_33))), CAST(32, MSB(DUP(op_AND_33)), DUP(op_AND_33)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_33)), DUP(op_AND_33))), CAST(32, MSB(DUP(op_AND_33)), DUP(op_AND_33))))), op_SUB_49); + RzILOpPure *op_RSHIFT_52 = SHIFTRA(op_RSHIFT_50, SN(32, 1)); + RzILOpPure *op_MUL_54 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_55 = SHIFTRA(DUP(Rss), op_MUL_54); + RzILOpPure *op_AND_57 = LOGAND(op_RSHIFT_55, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_70 = SHIFTL0(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_57), DUP(op_AND_57))), CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57))), CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57))), CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57))), CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57))))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7))); + RzILOpPure *cond_71 = ITE(op_LT_27, op_RSHIFT_52, op_LSHIFT_70); + RzILOpPure *op_AND_73 = LOGAND(cond_71, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_75 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_76 = SHIFTL0(op_AND_73, op_MUL_75); + RzILOpPure *op_OR_77 = LOGOR(op_AND_15, op_LSHIFT_76); + RzILOpEffect *op_ASSIGN_78 = WRITE_REG(bundle, Rdd_op, op_OR_77); + + // seq(h_tmp459; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ((((s ...; + RzILOpEffect *seq_80 = op_ASSIGN_78; + + // seq(seq(h_tmp459; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ( ...; + RzILOpEffect *seq_81 = SEQN(2, seq_80, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp459; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_82 = REPEAT(op_LT_4, seq_81); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp459; Rdd = ((Rdd & ...; + RzILOpEffect *seq_83 = SEQN(2, op_ASSIGN_2, for_82); + + RzILOpEffect *instruction_sequence = seq_83; + return instruction_sequence; +} + +// Rdd = asr(Rss,Ii) +RzILOpEffect *hex_il_op_s2_asr_i_p(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // Rdd = (Rss >> u); + RzILOpPure *op_RSHIFT_4 = SHIFTRA(Rss, VARL("u")); + RzILOpEffect *op_ASSIGN_5 = WRITE_REG(bundle, Rdd_op, op_RSHIFT_4); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, op_ASSIGN_5); + return instruction_sequence; +} + +// Rxx += asr(Rss,Ii) +RzILOpEffect *hex_il_op_s2_asr_i_p_acc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // Rxx = Rxx + (Rss >> u); + RzILOpPure *op_RSHIFT_4 = SHIFTRA(Rss, VARL("u")); + RzILOpPure *op_ADD_5 = ADD(READ_REG(pkt, Rxx_op, false), op_RSHIFT_4); + RzILOpEffect *op_ASSIGN_6 = WRITE_REG(bundle, Rxx_op, op_ADD_5); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, op_ASSIGN_6); + return instruction_sequence; +} + +// Rxx &= asr(Rss,Ii) +RzILOpEffect *hex_il_op_s2_asr_i_p_and(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // Rxx = (Rxx & (Rss >> u)); + RzILOpPure *op_RSHIFT_4 = SHIFTRA(Rss, VARL("u")); + RzILOpPure *op_AND_5 = LOGAND(READ_REG(pkt, Rxx_op, false), op_RSHIFT_4); + RzILOpEffect *op_ASSIGN_6 = WRITE_REG(bundle, Rxx_op, op_AND_5); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, op_ASSIGN_6); + return instruction_sequence; +} + +// Rxx -= asr(Rss,Ii) +RzILOpEffect *hex_il_op_s2_asr_i_p_nac(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // Rxx = Rxx - (Rss >> u); + RzILOpPure *op_RSHIFT_4 = SHIFTRA(Rss, VARL("u")); + RzILOpPure *op_SUB_5 = SUB(READ_REG(pkt, Rxx_op, false), op_RSHIFT_4); + RzILOpEffect *op_ASSIGN_6 = WRITE_REG(bundle, Rxx_op, op_SUB_5); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, op_ASSIGN_6); + return instruction_sequence; +} + +// Rxx |= asr(Rss,Ii) +RzILOpEffect *hex_il_op_s2_asr_i_p_or(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // Rxx = (Rxx | (Rss >> u)); + RzILOpPure *op_RSHIFT_4 = SHIFTRA(Rss, VARL("u")); + RzILOpPure *op_OR_5 = LOGOR(READ_REG(pkt, Rxx_op, false), op_RSHIFT_4); + RzILOpEffect *op_ASSIGN_6 = WRITE_REG(bundle, Rxx_op, op_OR_5); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, op_ASSIGN_6); + return instruction_sequence; +} + +// Rdd = asr(Rss,Ii):rnd +RzILOpEffect *hex_il_op_s2_asr_i_p_rnd(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut64 tmp; + // Declare: ut64 rnd; + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // tmp = ((ut64) (Rss >> u)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(Rss, VARL("u")); + RzILOpEffect *op_ASSIGN_7 = SETL("tmp", CAST(64, IL_FALSE, op_RSHIFT_5)); + + // rnd = (tmp & ((ut64) 0x1)); + RzILOpPure *op_AND_10 = LOGAND(VARL("tmp"), CAST(64, IL_FALSE, SN(32, 1))); + RzILOpEffect *op_ASSIGN_11 = SETL("rnd", op_AND_10); + + // Rdd = ((st64) ((ut64) (((st64) tmp) >> 0x1)) + rnd); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(CAST(64, IL_FALSE, VARL("tmp")), SN(32, 1)); + RzILOpPure *op_ADD_17 = ADD(CAST(64, IL_FALSE, op_RSHIFT_15), VARL("rnd")); + RzILOpEffect *op_ASSIGN_19 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_ADD_17)); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_3, op_ASSIGN_7, op_ASSIGN_11, op_ASSIGN_19); + return instruction_sequence; +} + +// Rd = asr(Rs,Ii) +RzILOpEffect *hex_il_op_s2_asr_i_r(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // Rd = (Rs >> u); + RzILOpPure *op_RSHIFT_4 = SHIFTRA(Rs, VARL("u")); + RzILOpEffect *op_ASSIGN_5 = WRITE_REG(bundle, Rd_op, op_RSHIFT_4); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, op_ASSIGN_5); + return instruction_sequence; +} + +// Rx += asr(Rs,Ii) +RzILOpEffect *hex_il_op_s2_asr_i_r_acc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // Rx = Rx + (Rs >> u); + RzILOpPure *op_RSHIFT_4 = SHIFTRA(Rs, VARL("u")); + RzILOpPure *op_ADD_5 = ADD(READ_REG(pkt, Rx_op, false), op_RSHIFT_4); + RzILOpEffect *op_ASSIGN_6 = WRITE_REG(bundle, Rx_op, op_ADD_5); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, op_ASSIGN_6); + return instruction_sequence; +} + +// Rx &= asr(Rs,Ii) +RzILOpEffect *hex_il_op_s2_asr_i_r_and(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // Rx = (Rx & (Rs >> u)); + RzILOpPure *op_RSHIFT_4 = SHIFTRA(Rs, VARL("u")); + RzILOpPure *op_AND_5 = LOGAND(READ_REG(pkt, Rx_op, false), op_RSHIFT_4); + RzILOpEffect *op_ASSIGN_6 = WRITE_REG(bundle, Rx_op, op_AND_5); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, op_ASSIGN_6); + return instruction_sequence; +} + +// Rx -= asr(Rs,Ii) +RzILOpEffect *hex_il_op_s2_asr_i_r_nac(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // Rx = Rx - (Rs >> u); + RzILOpPure *op_RSHIFT_4 = SHIFTRA(Rs, VARL("u")); + RzILOpPure *op_SUB_5 = SUB(READ_REG(pkt, Rx_op, false), op_RSHIFT_4); + RzILOpEffect *op_ASSIGN_6 = WRITE_REG(bundle, Rx_op, op_SUB_5); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, op_ASSIGN_6); + return instruction_sequence; +} + +// Rx |= asr(Rs,Ii) +RzILOpEffect *hex_il_op_s2_asr_i_r_or(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // Rx = (Rx | (Rs >> u)); + RzILOpPure *op_RSHIFT_4 = SHIFTRA(Rs, VARL("u")); + RzILOpPure *op_OR_5 = LOGOR(READ_REG(pkt, Rx_op, false), op_RSHIFT_4); + RzILOpEffect *op_ASSIGN_6 = WRITE_REG(bundle, Rx_op, op_OR_5); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, op_ASSIGN_6); + return instruction_sequence; +} + +// Rd = asr(Rs,Ii):rnd +RzILOpEffect *hex_il_op_s2_asr_i_r_rnd(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // Rd = ((st32) ((((st64) Rs) >> u) + ((st64) 0x1) >> 0x1)); + RzILOpPure *op_RSHIFT_5 = SHIFTRA(CAST(64, MSB(Rs), DUP(Rs)), VARL("u")); + RzILOpPure *op_ADD_8 = ADD(op_RSHIFT_5, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *op_RSHIFT_10 = SHIFTRA(op_ADD_8, SN(32, 1)); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(op_RSHIFT_10), DUP(op_RSHIFT_10))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_3, op_ASSIGN_12); + return instruction_sequence; +} + +// Rd = vasrw(Rss,Ii) +RzILOpEffect *hex_il_op_s2_asr_i_svw_trun(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp460 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp460", VARL("i")); + + // seq(h_tmp460 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // u = u; + RzILOpEffect *imm_assign_25 = SETL("u", u); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((st16) (((((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) >> u) >> 0x0) & ((st64) 0xffff)))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_16 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_14); + RzILOpPure *op_MUL_19 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_20 = SHIFTRA(Rss, op_MUL_19); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_20, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_27 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_22), DUP(op_AND_22))), CAST(32, MSB(DUP(op_AND_22)), DUP(op_AND_22))), VARL("u")); + RzILOpPure *op_RSHIFT_31 = SHIFTRA(op_RSHIFT_27, SN(32, 0)); + RzILOpPure *op_AND_34 = LOGAND(op_RSHIFT_31, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_38 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_34), DUP(op_AND_34))), CAST(16, MSB(DUP(op_AND_34)), DUP(op_AND_34))), SN(32, 0xffff)); + RzILOpPure *op_MUL_41 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_42 = SHIFTL0(CAST(64, IL_FALSE, op_AND_38), op_MUL_41); + RzILOpPure *op_OR_44 = LOGOR(CAST(64, IL_FALSE, op_AND_16), op_LSHIFT_42); + RzILOpEffect *op_ASSIGN_46 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_44)); + + // seq(h_tmp460; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << ...; + RzILOpEffect *seq_48 = op_ASSIGN_46; + + // seq(seq(h_tmp460; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ...; + RzILOpEffect *seq_49 = SEQN(2, seq_48, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp460; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_50 = REPEAT(op_LT_4, seq_49); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp460; Rd = ((st32) ...; + RzILOpEffect *seq_51 = SEQN(2, op_ASSIGN_2, for_50); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_25, seq_51); + return instruction_sequence; +} + +// Rdd = vasrh(Rss,Ii) +RzILOpEffect *hex_il_op_s2_asr_i_vh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp461 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp461", VARL("i")); + + // seq(h_tmp461 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // u = u; + RzILOpEffect *imm_assign_24 = SETL("u", u); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) (((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))) >> u)) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rss, op_MUL_18); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_26 = SHIFTRA(CAST(16, MSB(op_AND_22), DUP(op_AND_22)), VARL("u")); + RzILOpPure *op_AND_29 = LOGAND(CAST(32, MSB(op_RSHIFT_26), DUP(op_RSHIFT_26)), SN(32, 0xffff)); + RzILOpPure *op_MUL_32 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_33 = SHIFTL0(CAST(64, IL_FALSE, op_AND_29), op_MUL_32); + RzILOpPure *op_OR_35 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_33); + RzILOpEffect *op_ASSIGN_37 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_35)); + + // seq(h_tmp461; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x1 ...; + RzILOpEffect *seq_39 = op_ASSIGN_37; + + // seq(seq(h_tmp461; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_40 = SEQN(2, seq_39, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp461; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_41 = REPEAT(op_LT_4, seq_40); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp461; Rdd = ((st64) ...; + RzILOpEffect *seq_42 = SEQN(2, op_ASSIGN_2, for_41); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_24, seq_42); + return instruction_sequence; +} + +// Rdd = vasrw(Rss,Ii) +RzILOpEffect *hex_il_op_s2_asr_i_vw(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp462 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp462", VARL("i")); + + // seq(h_tmp462 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // u = u; + RzILOpEffect *imm_assign_24 = SETL("u", u); + + // Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | (((((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) >> u) & 0xffffffff) << i * 0x20)); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffffffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rss, op_MUL_18); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_19, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_26 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(op_AND_21), DUP(op_AND_21))), CAST(32, MSB(DUP(op_AND_21)), DUP(op_AND_21))), VARL("u")); + RzILOpPure *op_AND_28 = LOGAND(op_RSHIFT_26, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_30 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_31 = SHIFTL0(op_AND_28, op_MUL_30); + RzILOpPure *op_OR_32 = LOGOR(op_AND_15, op_LSHIFT_31); + RzILOpEffect *op_ASSIGN_33 = WRITE_REG(bundle, Rdd_op, op_OR_32); + + // seq(h_tmp462; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ((((( ...; + RzILOpEffect *seq_35 = op_ASSIGN_33; + + // seq(seq(h_tmp462; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ( ...; + RzILOpEffect *seq_36 = SEQN(2, seq_35, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp462; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_37 = REPEAT(op_LT_4, seq_36); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp462; Rdd = ((Rdd & ...; + RzILOpEffect *seq_38 = SEQN(2, op_ASSIGN_2, for_37); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_24, seq_38); + return instruction_sequence; +} + +// Rdd = asr(Rss,Rt) +RzILOpEffect *hex_il_op_s2_asr_r_p(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rdd = ((shamt < 0x0) ? ((Rss << (-shamt) - 0x1) << 0x1) : (Rss >> shamt)); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_16 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_18 = SUB(op_NEG_16, SN(32, 1)); + RzILOpPure *op_LSHIFT_19 = SHIFTL0(Rss, op_SUB_18); + RzILOpPure *op_LSHIFT_21 = SHIFTL0(op_LSHIFT_19, SN(32, 1)); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(DUP(Rss), VARL("shamt")); + RzILOpPure *cond_23 = ITE(op_LT_14, op_LSHIFT_21, op_RSHIFT_22); + RzILOpEffect *op_ASSIGN_24 = WRITE_REG(bundle, Rdd_op, cond_23); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_24); + return instruction_sequence; +} + +// Rxx += asr(Rss,Rt) +RzILOpEffect *hex_il_op_s2_asr_r_p_acc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rxx = Rxx + ((shamt < 0x0) ? ((Rss << (-shamt) - 0x1) << 0x1) : (Rss >> shamt)); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_16 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_18 = SUB(op_NEG_16, SN(32, 1)); + RzILOpPure *op_LSHIFT_19 = SHIFTL0(Rss, op_SUB_18); + RzILOpPure *op_LSHIFT_21 = SHIFTL0(op_LSHIFT_19, SN(32, 1)); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(DUP(Rss), VARL("shamt")); + RzILOpPure *cond_23 = ITE(op_LT_14, op_LSHIFT_21, op_RSHIFT_22); + RzILOpPure *op_ADD_24 = ADD(READ_REG(pkt, Rxx_op, false), cond_23); + RzILOpEffect *op_ASSIGN_25 = WRITE_REG(bundle, Rxx_op, op_ADD_24); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_25); + return instruction_sequence; +} + +// Rxx &= asr(Rss,Rt) +RzILOpEffect *hex_il_op_s2_asr_r_p_and(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rxx = (Rxx & ((shamt < 0x0) ? ((Rss << (-shamt) - 0x1) << 0x1) : (Rss >> shamt))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_16 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_18 = SUB(op_NEG_16, SN(32, 1)); + RzILOpPure *op_LSHIFT_19 = SHIFTL0(Rss, op_SUB_18); + RzILOpPure *op_LSHIFT_21 = SHIFTL0(op_LSHIFT_19, SN(32, 1)); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(DUP(Rss), VARL("shamt")); + RzILOpPure *cond_23 = ITE(op_LT_14, op_LSHIFT_21, op_RSHIFT_22); + RzILOpPure *op_AND_24 = LOGAND(READ_REG(pkt, Rxx_op, false), cond_23); + RzILOpEffect *op_ASSIGN_25 = WRITE_REG(bundle, Rxx_op, op_AND_24); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_25); + return instruction_sequence; +} + +// Rxx -= asr(Rss,Rt) +RzILOpEffect *hex_il_op_s2_asr_r_p_nac(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rxx = Rxx - ((shamt < 0x0) ? ((Rss << (-shamt) - 0x1) << 0x1) : (Rss >> shamt)); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_16 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_18 = SUB(op_NEG_16, SN(32, 1)); + RzILOpPure *op_LSHIFT_19 = SHIFTL0(Rss, op_SUB_18); + RzILOpPure *op_LSHIFT_21 = SHIFTL0(op_LSHIFT_19, SN(32, 1)); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(DUP(Rss), VARL("shamt")); + RzILOpPure *cond_23 = ITE(op_LT_14, op_LSHIFT_21, op_RSHIFT_22); + RzILOpPure *op_SUB_24 = SUB(READ_REG(pkt, Rxx_op, false), cond_23); + RzILOpEffect *op_ASSIGN_25 = WRITE_REG(bundle, Rxx_op, op_SUB_24); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_25); + return instruction_sequence; +} + +// Rxx |= asr(Rss,Rt) +RzILOpEffect *hex_il_op_s2_asr_r_p_or(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rxx = (Rxx | ((shamt < 0x0) ? ((Rss << (-shamt) - 0x1) << 0x1) : (Rss >> shamt))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_16 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_18 = SUB(op_NEG_16, SN(32, 1)); + RzILOpPure *op_LSHIFT_19 = SHIFTL0(Rss, op_SUB_18); + RzILOpPure *op_LSHIFT_21 = SHIFTL0(op_LSHIFT_19, SN(32, 1)); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(DUP(Rss), VARL("shamt")); + RzILOpPure *cond_23 = ITE(op_LT_14, op_LSHIFT_21, op_RSHIFT_22); + RzILOpPure *op_OR_24 = LOGOR(READ_REG(pkt, Rxx_op, false), cond_23); + RzILOpEffect *op_ASSIGN_25 = WRITE_REG(bundle, Rxx_op, op_OR_24); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_25); + return instruction_sequence; +} + +// Rxx ^= asr(Rss,Rt) +RzILOpEffect *hex_il_op_s2_asr_r_p_xor(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rxx = (Rxx ^ ((shamt < 0x0) ? ((Rss << (-shamt) - 0x1) << 0x1) : (Rss >> shamt))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_16 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_18 = SUB(op_NEG_16, SN(32, 1)); + RzILOpPure *op_LSHIFT_19 = SHIFTL0(Rss, op_SUB_18); + RzILOpPure *op_LSHIFT_21 = SHIFTL0(op_LSHIFT_19, SN(32, 1)); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(DUP(Rss), VARL("shamt")); + RzILOpPure *cond_23 = ITE(op_LT_14, op_LSHIFT_21, op_RSHIFT_22); + RzILOpPure *op_XOR_24 = LOGXOR(READ_REG(pkt, Rxx_op, false), cond_23); + RzILOpEffect *op_ASSIGN_25 = WRITE_REG(bundle, Rxx_op, op_XOR_24); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_25); + return instruction_sequence; +} + +// Rd = asr(Rs,Rt) +RzILOpEffect *hex_il_op_s2_asr_r_r(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rd = ((st32) ((shamt < 0x0) ? ((((st64) Rs) << (-shamt) - 0x1) << 0x1) : (((st64) Rs) >> shamt))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_17 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_19 = SUB(op_NEG_17, SN(32, 1)); + RzILOpPure *op_LSHIFT_20 = SHIFTL0(CAST(64, MSB(Rs), DUP(Rs)), op_SUB_19); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(op_LSHIFT_20, SN(32, 1)); + RzILOpPure *op_RSHIFT_24 = SHIFTRA(CAST(64, MSB(DUP(Rs)), DUP(Rs)), VARL("shamt")); + RzILOpPure *cond_25 = ITE(op_LT_14, op_LSHIFT_22, op_RSHIFT_24); + RzILOpEffect *op_ASSIGN_27 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_25), DUP(cond_25))); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_27); + return instruction_sequence; +} + +// Rx += asr(Rs,Rt) +RzILOpEffect *hex_il_op_s2_asr_r_r_acc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rx = ((st32) ((st64) Rx) + ((shamt < 0x0) ? ((((st64) Rs) << (-shamt) - 0x1) << 0x1) : (((st64) Rs) >> shamt))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_17 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_19 = SUB(op_NEG_17, SN(32, 1)); + RzILOpPure *op_LSHIFT_20 = SHIFTL0(CAST(64, MSB(Rs), DUP(Rs)), op_SUB_19); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(op_LSHIFT_20, SN(32, 1)); + RzILOpPure *op_RSHIFT_24 = SHIFTRA(CAST(64, MSB(DUP(Rs)), DUP(Rs)), VARL("shamt")); + RzILOpPure *cond_25 = ITE(op_LT_14, op_LSHIFT_22, op_RSHIFT_24); + RzILOpPure *op_ADD_27 = ADD(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), cond_25); + RzILOpEffect *op_ASSIGN_29 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(op_ADD_27), DUP(op_ADD_27))); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_29); + return instruction_sequence; +} + +// Rx &= asr(Rs,Rt) +RzILOpEffect *hex_il_op_s2_asr_r_r_and(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rx = ((st32) (((st64) Rx) & ((shamt < 0x0) ? ((((st64) Rs) << (-shamt) - 0x1) << 0x1) : (((st64) Rs) >> shamt)))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_17 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_19 = SUB(op_NEG_17, SN(32, 1)); + RzILOpPure *op_LSHIFT_20 = SHIFTL0(CAST(64, MSB(Rs), DUP(Rs)), op_SUB_19); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(op_LSHIFT_20, SN(32, 1)); + RzILOpPure *op_RSHIFT_24 = SHIFTRA(CAST(64, MSB(DUP(Rs)), DUP(Rs)), VARL("shamt")); + RzILOpPure *cond_25 = ITE(op_LT_14, op_LSHIFT_22, op_RSHIFT_24); + RzILOpPure *op_AND_27 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), cond_25); + RzILOpEffect *op_ASSIGN_29 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(op_AND_27), DUP(op_AND_27))); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_29); + return instruction_sequence; +} + +// Rx -= asr(Rs,Rt) +RzILOpEffect *hex_il_op_s2_asr_r_r_nac(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rx = ((st32) ((st64) Rx) - ((shamt < 0x0) ? ((((st64) Rs) << (-shamt) - 0x1) << 0x1) : (((st64) Rs) >> shamt))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_17 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_19 = SUB(op_NEG_17, SN(32, 1)); + RzILOpPure *op_LSHIFT_20 = SHIFTL0(CAST(64, MSB(Rs), DUP(Rs)), op_SUB_19); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(op_LSHIFT_20, SN(32, 1)); + RzILOpPure *op_RSHIFT_24 = SHIFTRA(CAST(64, MSB(DUP(Rs)), DUP(Rs)), VARL("shamt")); + RzILOpPure *cond_25 = ITE(op_LT_14, op_LSHIFT_22, op_RSHIFT_24); + RzILOpPure *op_SUB_27 = SUB(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), cond_25); + RzILOpEffect *op_ASSIGN_29 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(op_SUB_27), DUP(op_SUB_27))); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_29); + return instruction_sequence; +} + +// Rx |= asr(Rs,Rt) +RzILOpEffect *hex_il_op_s2_asr_r_r_or(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rx = ((st32) (((st64) Rx) | ((shamt < 0x0) ? ((((st64) Rs) << (-shamt) - 0x1) << 0x1) : (((st64) Rs) >> shamt)))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_17 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_19 = SUB(op_NEG_17, SN(32, 1)); + RzILOpPure *op_LSHIFT_20 = SHIFTL0(CAST(64, MSB(Rs), DUP(Rs)), op_SUB_19); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(op_LSHIFT_20, SN(32, 1)); + RzILOpPure *op_RSHIFT_24 = SHIFTRA(CAST(64, MSB(DUP(Rs)), DUP(Rs)), VARL("shamt")); + RzILOpPure *cond_25 = ITE(op_LT_14, op_LSHIFT_22, op_RSHIFT_24); + RzILOpPure *op_OR_27 = LOGOR(CAST(64, MSB(READ_REG(pkt, Rx_op, false)), READ_REG(pkt, Rx_op, false)), cond_25); + RzILOpEffect *op_ASSIGN_29 = WRITE_REG(bundle, Rx_op, CAST(32, MSB(op_OR_27), DUP(op_OR_27))); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_29); + return instruction_sequence; +} + +// Rd = asr(Rs,Rt):sat +RzILOpEffect *hex_il_op_s2_asr_r_r_sat(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_48 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) Rs) << (-shamt) - 0x1) << 0x1)), 0x0, 0x20) == ((((st64) Rs) << (-shamt) - 0x1) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) Rs) << (-shamt) - 0x1) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_NEG_20 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_22 = SUB(op_NEG_20, SN(32, 1)); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(CAST(64, MSB(Rs), DUP(Rs)), op_SUB_22); + RzILOpPure *op_LSHIFT_25 = SHIFTL0(op_LSHIFT_23, SN(32, 1)); + RzILOpPure *op_NEG_32 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_34 = SUB(op_NEG_32, SN(32, 1)); + RzILOpPure *op_LSHIFT_35 = SHIFTL0(CAST(64, MSB(DUP(Rs)), DUP(Rs)), op_SUB_34); + RzILOpPure *op_LSHIFT_37 = SHIFTL0(op_LSHIFT_35, SN(32, 1)); + RzILOpPure *op_EQ_38 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_LSHIFT_25), SN(32, 0), SN(32, 0x20)), op_LSHIFT_37); + RzILOpPure *op_NEG_50 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_52 = SUB(op_NEG_50, SN(32, 1)); + RzILOpPure *op_LSHIFT_53 = SHIFTL0(CAST(64, MSB(DUP(Rs)), DUP(Rs)), op_SUB_52); + RzILOpPure *op_LSHIFT_55 = SHIFTL0(op_LSHIFT_53, SN(32, 1)); + RzILOpPure *op_LT_58 = SLT(op_LSHIFT_55, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_63 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_64 = NEG(op_LSHIFT_63); + RzILOpPure *op_LSHIFT_69 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_72 = SUB(op_LSHIFT_69, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_73 = ITE(op_LT_58, op_NEG_64, op_SUB_72); + RzILOpEffect *gcc_expr_74 = BRANCH(op_EQ_38, EMPTY(), set_usr_field_call_48); + + // h_tmp463 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) Rs) << (-shamt) - 0x1) << 0x1)), 0x0, 0x20) == ((((st64) Rs) << (-shamt) - 0x1) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) Rs) << (-shamt) - 0x1) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_76 = SETL("h_tmp463", cond_73); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) Rs) << (-sham ...; + RzILOpEffect *seq_77 = SEQN(2, gcc_expr_74, op_ASSIGN_hybrid_tmp_76); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_86 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((((st32) (((sextract64(((ut64) ((((st64) Rs) << (-shamt) - 0x1) << 0x1)), 0x0, 0x20) == ((((st64) Rs) << (-shamt) - 0x1) << 0x1)) ? ((((st64) Rs) << (-shamt) - 0x1) << 0x1) : h_tmp463) ^ ((st64) Rs))) < 0x0)) {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))} else {{}}, ((Rs < 0x0) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_NEG_40 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_42 = SUB(op_NEG_40, SN(32, 1)); + RzILOpPure *op_LSHIFT_43 = SHIFTL0(CAST(64, MSB(DUP(Rs)), DUP(Rs)), op_SUB_42); + RzILOpPure *op_LSHIFT_45 = SHIFTL0(op_LSHIFT_43, SN(32, 1)); + RzILOpPure *cond_78 = ITE(DUP(op_EQ_38), op_LSHIFT_45, VARL("h_tmp463")); + RzILOpPure *op_XOR_80 = LOGXOR(cond_78, CAST(64, MSB(DUP(Rs)), DUP(Rs))); + RzILOpPure *op_LT_83 = SLT(CAST(32, MSB(op_XOR_80), DUP(op_XOR_80)), SN(32, 0)); + RzILOpPure *op_LT_88 = SLT(DUP(Rs), SN(32, 0)); + RzILOpPure *op_LSHIFT_93 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_94 = NEG(op_LSHIFT_93); + RzILOpPure *op_LSHIFT_99 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_102 = SUB(op_LSHIFT_99, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_103 = ITE(op_LT_88, op_NEG_94, op_SUB_102); + RzILOpEffect *gcc_expr_104 = BRANCH(op_LT_83, set_usr_field_call_86, EMPTY()); + + // h_tmp464 = HYB(gcc_expr_if ((((st32) (((sextract64(((ut64) ((((st64) Rs) << (-shamt) - 0x1) << 0x1)), 0x0, 0x20) == ((((st64) Rs) << (-shamt) - 0x1) << 0x1)) ? ((((st64) Rs) << (-shamt) - 0x1) << 0x1) : h_tmp463) ^ ((st64) Rs))) < 0x0)) {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))} else {{}}, ((Rs < 0x0) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_106 = SETL("h_tmp464", cond_103); + + // seq(HYB(gcc_expr_if ((((st32) (((sextract64(((ut64) ((((st64) Rs ...; + RzILOpEffect *seq_107 = SEQN(2, gcc_expr_104, op_ASSIGN_hybrid_tmp_106); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_123 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if (((Rs > 0x0) && (((((st64) Rs) << (-shamt) - 0x1) << 0x1) == ((st64) 0x0)))) {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))} else {{}}, ((Rs < 0x0) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_GT_109 = SGT(DUP(Rs), SN(32, 0)); + RzILOpPure *op_NEG_111 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_113 = SUB(op_NEG_111, SN(32, 1)); + RzILOpPure *op_LSHIFT_114 = SHIFTL0(CAST(64, MSB(DUP(Rs)), DUP(Rs)), op_SUB_113); + RzILOpPure *op_LSHIFT_116 = SHIFTL0(op_LSHIFT_114, SN(32, 1)); + RzILOpPure *op_EQ_119 = EQ(op_LSHIFT_116, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_AND_120 = AND(op_GT_109, op_EQ_119); + RzILOpPure *op_LT_125 = SLT(DUP(Rs), SN(32, 0)); + RzILOpPure *op_LSHIFT_130 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_131 = NEG(op_LSHIFT_130); + RzILOpPure *op_LSHIFT_136 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_139 = SUB(op_LSHIFT_136, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_140 = ITE(op_LT_125, op_NEG_131, op_SUB_139); + RzILOpEffect *gcc_expr_141 = BRANCH(op_AND_120, set_usr_field_call_123, EMPTY()); + + // h_tmp465 = HYB(gcc_expr_if (((Rs > 0x0) && (((((st64) Rs) << (-shamt) - 0x1) << 0x1) == ((st64) 0x0)))) {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))} else {{}}, ((Rs < 0x0) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_143 = SETL("h_tmp465", cond_140); + + // seq(HYB(gcc_expr_if (((Rs > 0x0) && (((((st64) Rs) << (-shamt) - ...; + RzILOpEffect *seq_144 = SEQN(2, gcc_expr_141, op_ASSIGN_hybrid_tmp_143); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_177 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) Rs) << (-shamt) - 0x1) << 0x1)), 0x0, 0x20) == ((((st64) Rs) << (-shamt) - 0x1) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) Rs) << (-shamt) - 0x1) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_NEG_149 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_151 = SUB(op_NEG_149, SN(32, 1)); + RzILOpPure *op_LSHIFT_152 = SHIFTL0(CAST(64, MSB(DUP(Rs)), DUP(Rs)), op_SUB_151); + RzILOpPure *op_LSHIFT_154 = SHIFTL0(op_LSHIFT_152, SN(32, 1)); + RzILOpPure *op_NEG_161 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_163 = SUB(op_NEG_161, SN(32, 1)); + RzILOpPure *op_LSHIFT_164 = SHIFTL0(CAST(64, MSB(DUP(Rs)), DUP(Rs)), op_SUB_163); + RzILOpPure *op_LSHIFT_166 = SHIFTL0(op_LSHIFT_164, SN(32, 1)); + RzILOpPure *op_EQ_167 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_LSHIFT_154), SN(32, 0), SN(32, 0x20)), op_LSHIFT_166); + RzILOpPure *op_NEG_179 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_181 = SUB(op_NEG_179, SN(32, 1)); + RzILOpPure *op_LSHIFT_182 = SHIFTL0(CAST(64, MSB(DUP(Rs)), DUP(Rs)), op_SUB_181); + RzILOpPure *op_LSHIFT_184 = SHIFTL0(op_LSHIFT_182, SN(32, 1)); + RzILOpPure *op_LT_187 = SLT(op_LSHIFT_184, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_192 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_193 = NEG(op_LSHIFT_192); + RzILOpPure *op_LSHIFT_198 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_201 = SUB(op_LSHIFT_198, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_202 = ITE(op_LT_187, op_NEG_193, op_SUB_201); + RzILOpEffect *gcc_expr_203 = BRANCH(op_EQ_167, EMPTY(), set_usr_field_call_177); + + // h_tmp466 = HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) Rs) << (-shamt) - 0x1) << 0x1)), 0x0, 0x20) == ((((st64) Rs) << (-shamt) - 0x1) << 0x1))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((((st64) Rs) << (-shamt) - 0x1) << 0x1) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_205 = SETL("h_tmp466", cond_202); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) Rs) << (-sham ...; + RzILOpEffect *seq_206 = SEQN(2, gcc_expr_203, op_ASSIGN_hybrid_tmp_205); + + // Rd = ((st32) ((shamt < 0x0) ? ((((st32) (((sextract64(((ut64) ((((st64) Rs) << (-shamt) - 0x1) << 0x1)), 0x0, 0x20) == ((((st64) Rs) << (-shamt) - 0x1) << 0x1)) ? ((((st64) Rs) << (-shamt) - 0x1) << 0x1) : h_tmp463) ^ ((st64) Rs))) < 0x0) ? h_tmp464 : (((Rs > 0x0) && (((((st64) Rs) << (-shamt) - 0x1) << 0x1) == ((st64) 0x0))) ? h_tmp465 : ((sextract64(((ut64) ((((st64) Rs) << (-shamt) - 0x1) << 0x1)), 0x0, 0x20) == ((((st64) Rs) << (-shamt) - 0x1) << 0x1)) ? ((((st64) Rs) << (-shamt) - 0x1) << 0x1) : h_tmp466))) : (((st64) Rs) >> shamt))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_169 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_171 = SUB(op_NEG_169, SN(32, 1)); + RzILOpPure *op_LSHIFT_172 = SHIFTL0(CAST(64, MSB(DUP(Rs)), DUP(Rs)), op_SUB_171); + RzILOpPure *op_LSHIFT_174 = SHIFTL0(op_LSHIFT_172, SN(32, 1)); + RzILOpPure *cond_207 = ITE(DUP(op_EQ_167), op_LSHIFT_174, VARL("h_tmp466")); + RzILOpPure *cond_208 = ITE(DUP(op_AND_120), VARL("h_tmp465"), cond_207); + RzILOpPure *cond_209 = ITE(DUP(op_LT_83), VARL("h_tmp464"), cond_208); + RzILOpPure *op_RSHIFT_211 = SHIFTRA(CAST(64, MSB(DUP(Rs)), DUP(Rs)), VARL("shamt")); + RzILOpPure *cond_212 = ITE(op_LT_14, cond_209, op_RSHIFT_211); + RzILOpEffect *op_ASSIGN_214 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_212), DUP(cond_212))); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((((st64) Rs) << (- ...; + RzILOpEffect *seq_215 = SEQN(5, seq_77, seq_107, seq_144, seq_206, op_ASSIGN_214); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, seq_215); + return instruction_sequence; +} + +// Rd = vasrw(Rss,Rt) +RzILOpEffect *hex_il_op_s2_asr_r_svw_trun(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp467 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp467", VARL("i")); + + // seq(h_tmp467 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((st16) ((((sextract64(((ut64) Rt), 0x0, 0x7) < ((st64) 0x0)) ? ((((st64) ((st32) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))))) << (-sextract64(((ut64) Rt), 0x0, 0x7)) - ((st64) 0x1)) << 0x1) : (((st64) ((st32) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))))) >> sextract64(((ut64) Rt), 0x0, 0x7))) >> 0x0) & ((st64) 0xffff)))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_16 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_14); + RzILOpPure *op_LT_28 = SLT(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7)), CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_MUL_31 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_32 = SHIFTRA(Rss, op_MUL_31); + RzILOpPure *op_AND_34 = LOGAND(op_RSHIFT_32, SN(64, 0xffffffff)); + RzILOpPure *op_NEG_47 = NEG(SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7))); + RzILOpPure *op_SUB_50 = SUB(op_NEG_47, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *op_LSHIFT_51 = SHIFTL0(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_34), DUP(op_AND_34))), CAST(32, MSB(DUP(op_AND_34)), DUP(op_AND_34)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_34)), DUP(op_AND_34))), CAST(32, MSB(DUP(op_AND_34)), DUP(op_AND_34))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_34)), DUP(op_AND_34))), CAST(32, MSB(DUP(op_AND_34)), DUP(op_AND_34)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_34)), DUP(op_AND_34))), CAST(32, MSB(DUP(op_AND_34)), DUP(op_AND_34))))), op_SUB_50); + RzILOpPure *op_LSHIFT_53 = SHIFTL0(op_LSHIFT_51, SN(32, 1)); + RzILOpPure *op_MUL_55 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_56 = SHIFTRA(DUP(Rss), op_MUL_55); + RzILOpPure *op_AND_58 = LOGAND(op_RSHIFT_56, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_71 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_58), DUP(op_AND_58))), CAST(32, MSB(DUP(op_AND_58)), DUP(op_AND_58)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_58)), DUP(op_AND_58))), CAST(32, MSB(DUP(op_AND_58)), DUP(op_AND_58))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_58)), DUP(op_AND_58))), CAST(32, MSB(DUP(op_AND_58)), DUP(op_AND_58)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_58)), DUP(op_AND_58))), CAST(32, MSB(DUP(op_AND_58)), DUP(op_AND_58))))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7))); + RzILOpPure *cond_72 = ITE(op_LT_28, op_LSHIFT_53, op_RSHIFT_71); + RzILOpPure *op_RSHIFT_76 = SHIFTRA(cond_72, SN(32, 0)); + RzILOpPure *op_AND_79 = LOGAND(op_RSHIFT_76, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_83 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_79), DUP(op_AND_79))), CAST(16, MSB(DUP(op_AND_79)), DUP(op_AND_79))), SN(32, 0xffff)); + RzILOpPure *op_MUL_86 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_87 = SHIFTL0(CAST(64, IL_FALSE, op_AND_83), op_MUL_86); + RzILOpPure *op_OR_89 = LOGOR(CAST(64, IL_FALSE, op_AND_16), op_LSHIFT_87); + RzILOpEffect *op_ASSIGN_91 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_89)); + + // seq(h_tmp467; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << ...; + RzILOpEffect *seq_93 = op_ASSIGN_91; + + // seq(seq(h_tmp467; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ...; + RzILOpEffect *seq_94 = SEQN(2, seq_93, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp467; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_95 = REPEAT(op_LT_4, seq_94); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp467; Rd = ((st32) ...; + RzILOpEffect *seq_96 = SEQN(2, op_ASSIGN_2, for_95); + + RzILOpEffect *instruction_sequence = seq_96; + return instruction_sequence; +} + +// Rdd = vasrh(Rss,Rt) +RzILOpEffect *hex_il_op_s2_asr_r_vh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp468 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp468", VARL("i")); + + // seq(h_tmp468 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((sextract64(((ut64) Rt), 0x0, 0x7) < ((st64) 0x0)) ? ((((st64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) << (-sextract64(((ut64) Rt), 0x0, 0x7)) - ((st64) 0x1)) << 0x1) : (((st64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) >> sextract64(((ut64) Rt), 0x0, 0x7))) & ((st64) 0xffff))) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_LT_27 = SLT(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7)), CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_MUL_30 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_31 = SHIFTRA(Rss, op_MUL_30); + RzILOpPure *op_AND_34 = LOGAND(op_RSHIFT_31, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_45 = NEG(SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7))); + RzILOpPure *op_SUB_48 = SUB(op_NEG_45, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *op_LSHIFT_49 = SHIFTL0(CAST(64, MSB(CAST(16, MSB(op_AND_34), DUP(op_AND_34))), CAST(16, MSB(DUP(op_AND_34)), DUP(op_AND_34))), op_SUB_48); + RzILOpPure *op_LSHIFT_51 = SHIFTL0(op_LSHIFT_49, SN(32, 1)); + RzILOpPure *op_MUL_53 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_54 = SHIFTRA(DUP(Rss), op_MUL_53); + RzILOpPure *op_AND_57 = LOGAND(op_RSHIFT_54, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_68 = SHIFTRA(CAST(64, MSB(CAST(16, MSB(op_AND_57), DUP(op_AND_57))), CAST(16, MSB(DUP(op_AND_57)), DUP(op_AND_57))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7))); + RzILOpPure *cond_69 = ITE(op_LT_27, op_LSHIFT_51, op_RSHIFT_68); + RzILOpPure *op_AND_72 = LOGAND(cond_69, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_75 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_76 = SHIFTL0(CAST(64, IL_FALSE, op_AND_72), op_MUL_75); + RzILOpPure *op_OR_78 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_76); + RzILOpEffect *op_ASSIGN_80 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_78)); + + // seq(h_tmp468; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x1 ...; + RzILOpEffect *seq_82 = op_ASSIGN_80; + + // seq(seq(h_tmp468; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_83 = SEQN(2, seq_82, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp468; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_84 = REPEAT(op_LT_4, seq_83); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp468; Rdd = ((st64) ...; + RzILOpEffect *seq_85 = SEQN(2, op_ASSIGN_2, for_84); + + RzILOpEffect *instruction_sequence = seq_85; + return instruction_sequence; +} + +// Rdd = vasrw(Rss,Rt) +RzILOpEffect *hex_il_op_s2_asr_r_vw(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp469 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp469", VARL("i")); + + // seq(h_tmp469 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ((((sextract64(((ut64) Rt), 0x0, 0x7) < ((st64) 0x0)) ? ((((st64) ((st32) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))))) << (-sextract64(((ut64) Rt), 0x0, 0x7)) - ((st64) 0x1)) << 0x1) : (((st64) ((st32) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))))) >> sextract64(((ut64) Rt), 0x0, 0x7))) & 0xffffffff) << i * 0x20)); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffffffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_LT_27 = SLT(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7)), CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_MUL_30 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_31 = SHIFTRA(Rss, op_MUL_30); + RzILOpPure *op_AND_33 = LOGAND(op_RSHIFT_31, SN(64, 0xffffffff)); + RzILOpPure *op_NEG_46 = NEG(SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7))); + RzILOpPure *op_SUB_49 = SUB(op_NEG_46, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *op_LSHIFT_50 = SHIFTL0(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_33), DUP(op_AND_33))), CAST(32, MSB(DUP(op_AND_33)), DUP(op_AND_33)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_33)), DUP(op_AND_33))), CAST(32, MSB(DUP(op_AND_33)), DUP(op_AND_33))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_33)), DUP(op_AND_33))), CAST(32, MSB(DUP(op_AND_33)), DUP(op_AND_33)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_33)), DUP(op_AND_33))), CAST(32, MSB(DUP(op_AND_33)), DUP(op_AND_33))))), op_SUB_49); + RzILOpPure *op_LSHIFT_52 = SHIFTL0(op_LSHIFT_50, SN(32, 1)); + RzILOpPure *op_MUL_54 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_55 = SHIFTRA(DUP(Rss), op_MUL_54); + RzILOpPure *op_AND_57 = LOGAND(op_RSHIFT_55, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_70 = SHIFTRA(CAST(64, MSB(CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(op_AND_57), DUP(op_AND_57))), CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57))), CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57))))), CAST(32, MSB(CAST(64, MSB(CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57))), CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57)))), CAST(64, MSB(CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57))), CAST(32, MSB(DUP(op_AND_57)), DUP(op_AND_57))))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7))); + RzILOpPure *cond_71 = ITE(op_LT_27, op_LSHIFT_52, op_RSHIFT_70); + RzILOpPure *op_AND_73 = LOGAND(cond_71, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_75 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_76 = SHIFTL0(op_AND_73, op_MUL_75); + RzILOpPure *op_OR_77 = LOGOR(op_AND_15, op_LSHIFT_76); + RzILOpEffect *op_ASSIGN_78 = WRITE_REG(bundle, Rdd_op, op_OR_77); + + // seq(h_tmp469; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ((((s ...; + RzILOpEffect *seq_80 = op_ASSIGN_78; + + // seq(seq(h_tmp469; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ( ...; + RzILOpEffect *seq_81 = SEQN(2, seq_80, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp469; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_82 = REPEAT(op_LT_4, seq_81); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp469; Rdd = ((Rdd & ...; + RzILOpEffect *seq_83 = SEQN(2, op_ASSIGN_2, for_82); + + RzILOpEffect *instruction_sequence = seq_83; + return instruction_sequence; +} + +// Rd = brev(Rs) +RzILOpEffect *hex_il_op_s2_brev(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // revbit32(((ut32) Rs)); + RzILOpEffect *revbit32_call_3 = hex_revbit32(CAST(32, IL_FALSE, Rs)); + + // h_tmp470 = revbit32(((ut32) Rs)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_5 = SETL("h_tmp470", UNSIGNED(32, VARL("ret_val"))); + + // seq(revbit32(((ut32) Rs)); h_tmp470 = revbit32(((ut32) Rs))); + RzILOpEffect *seq_6 = SEQN(2, revbit32_call_3, op_ASSIGN_hybrid_tmp_5); + + // Rd = ((st32) h_tmp470); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, VARL("h_tmp470"))); + + // seq(seq(revbit32(((ut32) Rs)); h_tmp470 = revbit32(((ut32) Rs))) ...; + RzILOpEffect *seq_9 = SEQN(2, seq_6, op_ASSIGN_8); + + RzILOpEffect *instruction_sequence = seq_9; + return instruction_sequence; +} + +// Rdd = brev(Rss) +RzILOpEffect *hex_il_op_s2_brevp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // revbit64(((ut64) Rss)); + RzILOpEffect *revbit64_call_3 = hex_revbit64(CAST(64, IL_FALSE, Rss)); + + // h_tmp471 = revbit64(((ut64) Rss)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_5 = SETL("h_tmp471", UNSIGNED(64, VARL("ret_val"))); + + // seq(revbit64(((ut64) Rss)); h_tmp471 = revbit64(((ut64) Rss))); + RzILOpEffect *seq_6 = SEQN(2, revbit64_call_3, op_ASSIGN_hybrid_tmp_5); + + // Rdd = ((st64) h_tmp471); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, VARL("h_tmp471"))); + + // seq(seq(revbit64(((ut64) Rss)); h_tmp471 = revbit64(((ut64) Rss) ...; + RzILOpEffect *seq_9 = SEQN(2, seq_6, op_ASSIGN_8); + + RzILOpEffect *instruction_sequence = seq_9; + return instruction_sequence; +} + +// Rdd = decbin(Rss,Rtt) +RzILOpEffect *hex_il_op_s2_cabacdecbin(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = cl0(Rs) +RzILOpEffect *hex_il_op_s2_cl0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // clo32(((ut32) (~Rs))); + RzILOpPure *op_NOT_2 = LOGNOT(Rs); + RzILOpEffect *clo32_call_4 = hex_clo32(CAST(32, IL_FALSE, op_NOT_2)); + + // h_tmp472 = clo32(((ut32) (~Rs))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_6 = SETL("h_tmp472", UNSIGNED(32, VARL("ret_val"))); + + // seq(clo32(((ut32) (~Rs))); h_tmp472 = clo32(((ut32) (~Rs)))); + RzILOpEffect *seq_7 = SEQN(2, clo32_call_4, op_ASSIGN_hybrid_tmp_6); + + // Rd = ((st32) h_tmp472); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, VARL("h_tmp472"))); + + // seq(seq(clo32(((ut32) (~Rs))); h_tmp472 = clo32(((ut32) (~Rs)))) ...; + RzILOpEffect *seq_10 = SEQN(2, seq_7, op_ASSIGN_9); + + RzILOpEffect *instruction_sequence = seq_10; + return instruction_sequence; +} + +// Rd = cl0(Rss) +RzILOpEffect *hex_il_op_s2_cl0p(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // clo64(((ut64) (~Rss))); + RzILOpPure *op_NOT_2 = LOGNOT(Rss); + RzILOpEffect *clo64_call_4 = hex_clo64(CAST(64, IL_FALSE, op_NOT_2)); + + // h_tmp473 = clo64(((ut64) (~Rss))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_6 = SETL("h_tmp473", UNSIGNED(64, VARL("ret_val"))); + + // seq(clo64(((ut64) (~Rss))); h_tmp473 = clo64(((ut64) (~Rss)))); + RzILOpEffect *seq_7 = SEQN(2, clo64_call_4, op_ASSIGN_hybrid_tmp_6); + + // Rd = ((st32) h_tmp473); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, VARL("h_tmp473"))); + + // seq(seq(clo64(((ut64) (~Rss))); h_tmp473 = clo64(((ut64) (~Rss)) ...; + RzILOpEffect *seq_10 = SEQN(2, seq_7, op_ASSIGN_9); + + RzILOpEffect *instruction_sequence = seq_10; + return instruction_sequence; +} + +// Rd = cl1(Rs) +RzILOpEffect *hex_il_op_s2_cl1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // clo32(((ut32) Rs)); + RzILOpEffect *clo32_call_3 = hex_clo32(CAST(32, IL_FALSE, Rs)); + + // h_tmp474 = clo32(((ut32) Rs)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_5 = SETL("h_tmp474", UNSIGNED(32, VARL("ret_val"))); + + // seq(clo32(((ut32) Rs)); h_tmp474 = clo32(((ut32) Rs))); + RzILOpEffect *seq_6 = SEQN(2, clo32_call_3, op_ASSIGN_hybrid_tmp_5); + + // Rd = ((st32) h_tmp474); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, VARL("h_tmp474"))); + + // seq(seq(clo32(((ut32) Rs)); h_tmp474 = clo32(((ut32) Rs))); Rd = ...; + RzILOpEffect *seq_9 = SEQN(2, seq_6, op_ASSIGN_8); + + RzILOpEffect *instruction_sequence = seq_9; + return instruction_sequence; +} + +// Rd = cl1(Rss) +RzILOpEffect *hex_il_op_s2_cl1p(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // clo64(((ut64) Rss)); + RzILOpEffect *clo64_call_3 = hex_clo64(CAST(64, IL_FALSE, Rss)); + + // h_tmp475 = clo64(((ut64) Rss)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_5 = SETL("h_tmp475", UNSIGNED(64, VARL("ret_val"))); + + // seq(clo64(((ut64) Rss)); h_tmp475 = clo64(((ut64) Rss))); + RzILOpEffect *seq_6 = SEQN(2, clo64_call_3, op_ASSIGN_hybrid_tmp_5); + + // Rd = ((st32) h_tmp475); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, VARL("h_tmp475"))); + + // seq(seq(clo64(((ut64) Rss)); h_tmp475 = clo64(((ut64) Rss))); Rd ...; + RzILOpEffect *seq_9 = SEQN(2, seq_6, op_ASSIGN_8); + + RzILOpEffect *instruction_sequence = seq_9; + return instruction_sequence; +} + +// Rd = clb(Rs) +RzILOpEffect *hex_il_op_s2_clb(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // clo32(((ut32) Rs)); + RzILOpEffect *clo32_call_3 = hex_clo32(CAST(32, IL_FALSE, Rs)); + + // h_tmp476 = clo32(((ut32) Rs)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_5 = SETL("h_tmp476", UNSIGNED(32, VARL("ret_val"))); + + // seq(clo32(((ut32) Rs)); h_tmp476 = clo32(((ut32) Rs))); + RzILOpEffect *seq_6 = SEQN(2, clo32_call_3, op_ASSIGN_hybrid_tmp_5); + + // clo32(((ut32) (~Rs))); + RzILOpPure *op_NOT_7 = LOGNOT(DUP(Rs)); + RzILOpEffect *clo32_call_9 = hex_clo32(CAST(32, IL_FALSE, op_NOT_7)); + + // h_tmp477 = clo32(((ut32) (~Rs))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_11 = SETL("h_tmp477", UNSIGNED(32, VARL("ret_val"))); + + // seq(clo32(((ut32) (~Rs))); h_tmp477 = clo32(((ut32) (~Rs)))); + RzILOpEffect *seq_12 = SEQN(2, clo32_call_9, op_ASSIGN_hybrid_tmp_11); + + // clo32(((ut32) Rs)); + RzILOpEffect *clo32_call_15 = hex_clo32(CAST(32, IL_FALSE, DUP(Rs))); + + // h_tmp478 = clo32(((ut32) Rs)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_17 = SETL("h_tmp478", UNSIGNED(32, VARL("ret_val"))); + + // seq(clo32(((ut32) Rs)); h_tmp478 = clo32(((ut32) Rs))); + RzILOpEffect *seq_18 = SEQN(2, clo32_call_15, op_ASSIGN_hybrid_tmp_17); + + // clo32(((ut32) (~Rs))); + RzILOpPure *op_NOT_19 = LOGNOT(DUP(Rs)); + RzILOpEffect *clo32_call_21 = hex_clo32(CAST(32, IL_FALSE, op_NOT_19)); + + // h_tmp479 = clo32(((ut32) (~Rs))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_23 = SETL("h_tmp479", UNSIGNED(32, VARL("ret_val"))); + + // seq(clo32(((ut32) (~Rs))); h_tmp479 = clo32(((ut32) (~Rs)))); + RzILOpEffect *seq_24 = SEQN(2, clo32_call_21, op_ASSIGN_hybrid_tmp_23); + + // Rd = ((st32) ((h_tmp476 > h_tmp477) ? h_tmp478 : h_tmp479)); + RzILOpPure *op_GT_13 = UGT(VARL("h_tmp476"), VARL("h_tmp477")); + RzILOpPure *cond_25 = ITE(op_GT_13, VARL("h_tmp478"), VARL("h_tmp479")); + RzILOpEffect *op_ASSIGN_27 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, cond_25)); + + // seq(seq(clo32(((ut32) Rs)); h_tmp476 = clo32(((ut32) Rs))); seq( ...; + RzILOpEffect *seq_28 = SEQN(5, seq_6, seq_12, seq_18, seq_24, op_ASSIGN_27); + + RzILOpEffect *instruction_sequence = seq_28; + return instruction_sequence; +} + +// Rd = normamt(Rs) +RzILOpEffect *hex_il_op_s2_clbnorm(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // Rd = 0x0; + RzILOpEffect *op_ASSIGN_5 = WRITE_REG(bundle, Rd_op, SN(32, 0)); + + // clo32(((ut32) Rs)); + RzILOpEffect *clo32_call_7 = hex_clo32(CAST(32, IL_FALSE, Rs)); + + // h_tmp480 = clo32(((ut32) Rs)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_9 = SETL("h_tmp480", UNSIGNED(32, VARL("ret_val"))); + + // seq(clo32(((ut32) Rs)); h_tmp480 = clo32(((ut32) Rs))); + RzILOpEffect *seq_10 = SEQN(2, clo32_call_7, op_ASSIGN_hybrid_tmp_9); + + // clo32(((ut32) (~Rs))); + RzILOpPure *op_NOT_11 = LOGNOT(DUP(Rs)); + RzILOpEffect *clo32_call_13 = hex_clo32(CAST(32, IL_FALSE, op_NOT_11)); + + // h_tmp481 = clo32(((ut32) (~Rs))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_15 = SETL("h_tmp481", UNSIGNED(32, VARL("ret_val"))); + + // seq(clo32(((ut32) (~Rs))); h_tmp481 = clo32(((ut32) (~Rs)))); + RzILOpEffect *seq_16 = SEQN(2, clo32_call_13, op_ASSIGN_hybrid_tmp_15); + + // clo32(((ut32) Rs)); + RzILOpEffect *clo32_call_19 = hex_clo32(CAST(32, IL_FALSE, DUP(Rs))); + + // h_tmp482 = clo32(((ut32) Rs)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_21 = SETL("h_tmp482", UNSIGNED(32, VARL("ret_val"))); + + // seq(clo32(((ut32) Rs)); h_tmp482 = clo32(((ut32) Rs))); + RzILOpEffect *seq_22 = SEQN(2, clo32_call_19, op_ASSIGN_hybrid_tmp_21); + + // clo32(((ut32) (~Rs))); + RzILOpPure *op_NOT_23 = LOGNOT(DUP(Rs)); + RzILOpEffect *clo32_call_25 = hex_clo32(CAST(32, IL_FALSE, op_NOT_23)); + + // h_tmp483 = clo32(((ut32) (~Rs))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_27 = SETL("h_tmp483", UNSIGNED(32, VARL("ret_val"))); + + // seq(clo32(((ut32) (~Rs))); h_tmp483 = clo32(((ut32) (~Rs)))); + RzILOpEffect *seq_28 = SEQN(2, clo32_call_25, op_ASSIGN_hybrid_tmp_27); + + // Rd = ((st32) ((h_tmp480 > h_tmp481) ? h_tmp482 : h_tmp483) - ((ut32) 0x1)); + RzILOpPure *op_GT_17 = UGT(VARL("h_tmp480"), VARL("h_tmp481")); + RzILOpPure *cond_29 = ITE(op_GT_17, VARL("h_tmp482"), VARL("h_tmp483")); + RzILOpPure *op_SUB_32 = SUB(cond_29, CAST(32, IL_FALSE, SN(32, 1))); + RzILOpEffect *op_ASSIGN_34 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_SUB_32)); + + // seq(seq(clo32(((ut32) Rs)); h_tmp480 = clo32(((ut32) Rs))); seq( ...; + RzILOpEffect *seq_35 = SEQN(5, seq_10, seq_16, seq_22, seq_28, op_ASSIGN_34); + + // seq(Rd = 0x0); + RzILOpEffect *seq_then_36 = op_ASSIGN_5; + + // seq(seq(seq(clo32(((ut32) Rs)); h_tmp480 = clo32(((ut32) Rs))); ...; + RzILOpEffect *seq_else_37 = seq_35; + + // if ((Rs == 0x0)) {seq(Rd = 0x0)} else {seq(seq(seq(clo32(((ut32) Rs)); h_tmp480 = clo32(((ut32) Rs))); ...}; + RzILOpPure *op_EQ_2 = EQ(DUP(Rs), SN(32, 0)); + RzILOpEffect *branch_38 = BRANCH(op_EQ_2, seq_then_36, seq_else_37); + + RzILOpEffect *instruction_sequence = branch_38; + return instruction_sequence; +} + +// Rd = clb(Rss) +RzILOpEffect *hex_il_op_s2_clbp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // clo64(((ut64) Rss)); + RzILOpEffect *clo64_call_3 = hex_clo64(CAST(64, IL_FALSE, Rss)); + + // h_tmp484 = clo64(((ut64) Rss)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_5 = SETL("h_tmp484", UNSIGNED(64, VARL("ret_val"))); + + // seq(clo64(((ut64) Rss)); h_tmp484 = clo64(((ut64) Rss))); + RzILOpEffect *seq_6 = SEQN(2, clo64_call_3, op_ASSIGN_hybrid_tmp_5); + + // clo64(((ut64) (~Rss))); + RzILOpPure *op_NOT_7 = LOGNOT(DUP(Rss)); + RzILOpEffect *clo64_call_9 = hex_clo64(CAST(64, IL_FALSE, op_NOT_7)); + + // h_tmp485 = clo64(((ut64) (~Rss))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_11 = SETL("h_tmp485", UNSIGNED(64, VARL("ret_val"))); + + // seq(clo64(((ut64) (~Rss))); h_tmp485 = clo64(((ut64) (~Rss)))); + RzILOpEffect *seq_12 = SEQN(2, clo64_call_9, op_ASSIGN_hybrid_tmp_11); + + // clo64(((ut64) Rss)); + RzILOpEffect *clo64_call_15 = hex_clo64(CAST(64, IL_FALSE, DUP(Rss))); + + // h_tmp486 = clo64(((ut64) Rss)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_17 = SETL("h_tmp486", UNSIGNED(64, VARL("ret_val"))); + + // seq(clo64(((ut64) Rss)); h_tmp486 = clo64(((ut64) Rss))); + RzILOpEffect *seq_18 = SEQN(2, clo64_call_15, op_ASSIGN_hybrid_tmp_17); + + // clo64(((ut64) (~Rss))); + RzILOpPure *op_NOT_19 = LOGNOT(DUP(Rss)); + RzILOpEffect *clo64_call_21 = hex_clo64(CAST(64, IL_FALSE, op_NOT_19)); + + // h_tmp487 = clo64(((ut64) (~Rss))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_23 = SETL("h_tmp487", UNSIGNED(64, VARL("ret_val"))); + + // seq(clo64(((ut64) (~Rss))); h_tmp487 = clo64(((ut64) (~Rss)))); + RzILOpEffect *seq_24 = SEQN(2, clo64_call_21, op_ASSIGN_hybrid_tmp_23); + + // Rd = ((st32) ((h_tmp484 > h_tmp485) ? h_tmp486 : h_tmp487)); + RzILOpPure *op_GT_13 = UGT(VARL("h_tmp484"), VARL("h_tmp485")); + RzILOpPure *cond_25 = ITE(op_GT_13, VARL("h_tmp486"), VARL("h_tmp487")); + RzILOpEffect *op_ASSIGN_27 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, cond_25)); + + // seq(seq(clo64(((ut64) Rss)); h_tmp484 = clo64(((ut64) Rss))); se ...; + RzILOpEffect *seq_28 = SEQN(5, seq_6, seq_12, seq_18, seq_24, op_ASSIGN_27); + + RzILOpEffect *instruction_sequence = seq_28; + return instruction_sequence; +} + +// Rd = clrbit(Rs,Ii) +RzILOpEffect *hex_il_op_s2_clrbit_i(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // Rd = (Rs & (~(0x1 << u))); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(32, 1), VARL("u")); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(Rs, op_NOT_6); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rd_op, op_AND_7); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_3, op_ASSIGN_8); + return instruction_sequence; +} + +// Rd = clrbit(Rs,Rt) +RzILOpEffect *hex_il_op_s2_clrbit_r(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((st32) (((ut64) Rs) & (~((sextract64(((ut64) Rt), 0x0, 0x7) < ((st64) 0x0)) ? ((((ut64) ((ut32) 0x1)) >> (-sextract64(((ut64) Rt), 0x0, 0x7)) - ((st64) 0x1)) >> 0x1) : (((ut64) ((ut32) 0x1)) << sextract64(((ut64) Rt), 0x0, 0x7)))))); + RzILOpPure *op_LT_13 = SLT(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7)), CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_NEG_25 = NEG(SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7))); + RzILOpPure *op_SUB_28 = SUB(op_NEG_25, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *op_RSHIFT_29 = SHIFTR0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, SN(32, 1))), op_SUB_28); + RzILOpPure *op_RSHIFT_31 = SHIFTR0(op_RSHIFT_29, SN(32, 1)); + RzILOpPure *op_LSHIFT_43 = SHIFTL0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, SN(32, 1))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7))); + RzILOpPure *cond_44 = ITE(op_LT_13, op_RSHIFT_31, op_LSHIFT_43); + RzILOpPure *op_NOT_45 = LOGNOT(cond_44); + RzILOpPure *op_AND_47 = LOGAND(CAST(64, IL_FALSE, Rs), op_NOT_45); + RzILOpEffect *op_ASSIGN_49 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_AND_47)); + + RzILOpEffect *instruction_sequence = op_ASSIGN_49; + return instruction_sequence; +} + +// Rd = ct0(Rs) +RzILOpEffect *hex_il_op_s2_ct0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // revbit32(((ut32) Rs)); + RzILOpEffect *revbit32_call_3 = hex_revbit32(CAST(32, IL_FALSE, Rs)); + + // h_tmp488 = revbit32(((ut32) Rs)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_5 = SETL("h_tmp488", UNSIGNED(32, VARL("ret_val"))); + + // seq(revbit32(((ut32) Rs)); h_tmp488 = revbit32(((ut32) Rs))); + RzILOpEffect *seq_6 = SEQN(2, revbit32_call_3, op_ASSIGN_hybrid_tmp_5); + + // clo32((~h_tmp488)); + RzILOpPure *op_NOT_7 = LOGNOT(VARL("h_tmp488")); + RzILOpEffect *clo32_call_8 = hex_clo32(op_NOT_7); + + // h_tmp489 = clo32((~h_tmp488)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_10 = SETL("h_tmp489", UNSIGNED(32, VARL("ret_val"))); + + // seq(clo32((~h_tmp488)); h_tmp489 = clo32((~h_tmp488))); + RzILOpEffect *seq_11 = SEQN(2, clo32_call_8, op_ASSIGN_hybrid_tmp_10); + + // seq(seq(revbit32(((ut32) Rs)); h_tmp488 = revbit32(((ut32) Rs))) ...; + RzILOpEffect *seq_12 = SEQN(2, seq_6, seq_11); + + // Rd = ((st32) h_tmp489); + RzILOpEffect *op_ASSIGN_14 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, VARL("h_tmp489"))); + + // seq(seq(seq(revbit32(((ut32) Rs)); h_tmp488 = revbit32(((ut32) R ...; + RzILOpEffect *seq_15 = SEQN(2, seq_12, op_ASSIGN_14); + + RzILOpEffect *instruction_sequence = seq_15; + return instruction_sequence; +} + +// Rd = ct0(Rss) +RzILOpEffect *hex_il_op_s2_ct0p(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // revbit64(((ut64) Rss)); + RzILOpEffect *revbit64_call_3 = hex_revbit64(CAST(64, IL_FALSE, Rss)); + + // h_tmp490 = revbit64(((ut64) Rss)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_5 = SETL("h_tmp490", UNSIGNED(64, VARL("ret_val"))); + + // seq(revbit64(((ut64) Rss)); h_tmp490 = revbit64(((ut64) Rss))); + RzILOpEffect *seq_6 = SEQN(2, revbit64_call_3, op_ASSIGN_hybrid_tmp_5); + + // clo64((~h_tmp490)); + RzILOpPure *op_NOT_7 = LOGNOT(VARL("h_tmp490")); + RzILOpEffect *clo64_call_8 = hex_clo64(op_NOT_7); + + // h_tmp491 = clo64((~h_tmp490)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_10 = SETL("h_tmp491", UNSIGNED(64, VARL("ret_val"))); + + // seq(clo64((~h_tmp490)); h_tmp491 = clo64((~h_tmp490))); + RzILOpEffect *seq_11 = SEQN(2, clo64_call_8, op_ASSIGN_hybrid_tmp_10); + + // seq(seq(revbit64(((ut64) Rss)); h_tmp490 = revbit64(((ut64) Rss) ...; + RzILOpEffect *seq_12 = SEQN(2, seq_6, seq_11); + + // Rd = ((st32) h_tmp491); + RzILOpEffect *op_ASSIGN_14 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, VARL("h_tmp491"))); + + // seq(seq(seq(revbit64(((ut64) Rss)); h_tmp490 = revbit64(((ut64) ...; + RzILOpEffect *seq_15 = SEQN(2, seq_12, op_ASSIGN_14); + + RzILOpEffect *instruction_sequence = seq_15; + return instruction_sequence; +} + +// Rd = ct1(Rs) +RzILOpEffect *hex_il_op_s2_ct1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // revbit32(((ut32) Rs)); + RzILOpEffect *revbit32_call_3 = hex_revbit32(CAST(32, IL_FALSE, Rs)); + + // h_tmp492 = revbit32(((ut32) Rs)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_5 = SETL("h_tmp492", UNSIGNED(32, VARL("ret_val"))); + + // seq(revbit32(((ut32) Rs)); h_tmp492 = revbit32(((ut32) Rs))); + RzILOpEffect *seq_6 = SEQN(2, revbit32_call_3, op_ASSIGN_hybrid_tmp_5); + + // clo32(h_tmp492); + RzILOpEffect *clo32_call_7 = hex_clo32(VARL("h_tmp492")); + + // h_tmp493 = clo32(h_tmp492); + RzILOpEffect *op_ASSIGN_hybrid_tmp_9 = SETL("h_tmp493", UNSIGNED(32, VARL("ret_val"))); + + // seq(clo32(h_tmp492); h_tmp493 = clo32(h_tmp492)); + RzILOpEffect *seq_10 = SEQN(2, clo32_call_7, op_ASSIGN_hybrid_tmp_9); + + // seq(seq(revbit32(((ut32) Rs)); h_tmp492 = revbit32(((ut32) Rs))) ...; + RzILOpEffect *seq_11 = SEQN(2, seq_6, seq_10); + + // Rd = ((st32) h_tmp493); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, VARL("h_tmp493"))); + + // seq(seq(seq(revbit32(((ut32) Rs)); h_tmp492 = revbit32(((ut32) R ...; + RzILOpEffect *seq_14 = SEQN(2, seq_11, op_ASSIGN_13); + + RzILOpEffect *instruction_sequence = seq_14; + return instruction_sequence; +} + +// Rd = ct1(Rss) +RzILOpEffect *hex_il_op_s2_ct1p(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // revbit64(((ut64) Rss)); + RzILOpEffect *revbit64_call_3 = hex_revbit64(CAST(64, IL_FALSE, Rss)); + + // h_tmp494 = revbit64(((ut64) Rss)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_5 = SETL("h_tmp494", UNSIGNED(64, VARL("ret_val"))); + + // seq(revbit64(((ut64) Rss)); h_tmp494 = revbit64(((ut64) Rss))); + RzILOpEffect *seq_6 = SEQN(2, revbit64_call_3, op_ASSIGN_hybrid_tmp_5); + + // clo64(h_tmp494); + RzILOpEffect *clo64_call_7 = hex_clo64(VARL("h_tmp494")); + + // h_tmp495 = clo64(h_tmp494); + RzILOpEffect *op_ASSIGN_hybrid_tmp_9 = SETL("h_tmp495", UNSIGNED(64, VARL("ret_val"))); + + // seq(clo64(h_tmp494); h_tmp495 = clo64(h_tmp494)); + RzILOpEffect *seq_10 = SEQN(2, clo64_call_7, op_ASSIGN_hybrid_tmp_9); + + // seq(seq(revbit64(((ut64) Rss)); h_tmp494 = revbit64(((ut64) Rss) ...; + RzILOpEffect *seq_11 = SEQN(2, seq_6, seq_10); + + // Rd = ((st32) h_tmp495); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, VARL("h_tmp495"))); + + // seq(seq(seq(revbit64(((ut64) Rss)); h_tmp494 = revbit64(((ut64) ...; + RzILOpEffect *seq_14 = SEQN(2, seq_11, op_ASSIGN_13); + + RzILOpEffect *instruction_sequence = seq_14; + return instruction_sequence; +} + +// Rdd = deinterleave(Rss) +RzILOpEffect *hex_il_op_s2_deinterleave(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = extractu(Rs,Ii,II) +RzILOpEffect *hex_il_op_s2_extractu(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: st32 width; + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: st32 offset; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // width = ((st32) u); + RzILOpEffect *op_ASSIGN_3 = SETL("width", CAST(32, IL_FALSE, VARL("u"))); + + // U = U; + RzILOpEffect *imm_assign_5 = SETL("U", U); + + // offset = ((st32) U); + RzILOpEffect *op_ASSIGN_8 = SETL("offset", CAST(32, IL_FALSE, VARL("U"))); + + // Rd = ((st32) ((width != 0x0) ? extract64(((ut64) (((ut32) Rs) >> offset)), 0x0, width) : ((ut64) 0x0))); + RzILOpPure *op_NE_12 = INV(EQ(VARL("width"), SN(32, 0))); + RzILOpPure *op_RSHIFT_15 = SHIFTR0(CAST(32, IL_FALSE, Rs), VARL("offset")); + RzILOpPure *cond_21 = ITE(op_NE_12, EXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_15), SN(32, 0), VARL("width")), CAST(64, IL_FALSE, SN(64, 0))); + RzILOpEffect *op_ASSIGN_23 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, cond_21)); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_0, imm_assign_5, op_ASSIGN_3, op_ASSIGN_8, op_ASSIGN_23); + return instruction_sequence; +} + +// Rd = extractu(Rs,Rtt) +RzILOpEffect *hex_il_op_s2_extractu_rp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + // Declare: st32 width; + // Declare: st32 offset; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // width = ((st32) extract64(((ut64) ((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff)))), 0x0, 0x6)); + RzILOpPure *op_RSHIFT_7 = SHIFTRA(Rtt, SN(32, 0x20)); + RzILOpPure *op_AND_9 = LOGAND(op_RSHIFT_7, SN(64, 0xffffffff)); + RzILOpEffect *op_ASSIGN_18 = SETL("width", CAST(32, IL_FALSE, EXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_9), DUP(op_AND_9))), CAST(32, MSB(DUP(op_AND_9)), DUP(op_AND_9)))), SN(32, 0), SN(32, 6)))); + + // offset = ((st32) sextract64(((ut64) ((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff)))), 0x0, 0x7)); + RzILOpPure *op_RSHIFT_26 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_28 = LOGAND(op_RSHIFT_26, SN(64, 0xffffffff)); + RzILOpEffect *op_ASSIGN_37 = SETL("offset", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_28), DUP(op_AND_28))), CAST(32, MSB(DUP(op_AND_28)), DUP(op_AND_28)))), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(DUP(op_AND_28)), DUP(op_AND_28))), CAST(32, MSB(DUP(op_AND_28)), DUP(op_AND_28)))), SN(32, 0), SN(32, 7)))); + + // Rd = ((st32) ((width != 0x0) ? extract64(((offset < 0x0) ? ((((ut64) ((ut32) ((ut64) ((ut32) Rs)))) << (-offset) - 0x1) << 0x1) : (((ut64) ((ut32) ((ut64) ((ut32) Rs)))) >> offset)), 0x0, width) : ((ut64) 0x0))); + RzILOpPure *op_NE_41 = INV(EQ(VARL("width"), SN(32, 0))); + RzILOpPure *op_LT_43 = SLT(VARL("offset"), SN(32, 0)); + RzILOpPure *op_NEG_49 = NEG(VARL("offset")); + RzILOpPure *op_SUB_51 = SUB(op_NEG_49, SN(32, 1)); + RzILOpPure *op_LSHIFT_52 = SHIFTL0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, CAST(64, IL_FALSE, CAST(32, IL_FALSE, Rs)))), op_SUB_51); + RzILOpPure *op_LSHIFT_54 = SHIFTL0(op_LSHIFT_52, SN(32, 1)); + RzILOpPure *op_RSHIFT_59 = SHIFTR0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, CAST(64, IL_FALSE, CAST(32, IL_FALSE, DUP(Rs))))), VARL("offset")); + RzILOpPure *cond_60 = ITE(op_LT_43, op_LSHIFT_54, op_RSHIFT_59); + RzILOpPure *cond_65 = ITE(op_NE_41, EXTRACT64(cond_60, SN(32, 0), VARL("width")), CAST(64, IL_FALSE, SN(64, 0))); + RzILOpEffect *op_ASSIGN_67 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, cond_65)); + + RzILOpEffect *instruction_sequence = SEQN(3, op_ASSIGN_18, op_ASSIGN_37, op_ASSIGN_67); + return instruction_sequence; +} + +// Rdd = extractu(Rss,Ii,II) +RzILOpEffect *hex_il_op_s2_extractup(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: st32 width; + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: st32 offset; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // width = ((st32) u); + RzILOpEffect *op_ASSIGN_3 = SETL("width", CAST(32, IL_FALSE, VARL("u"))); + + // U = U; + RzILOpEffect *imm_assign_5 = SETL("U", U); + + // offset = ((st32) U); + RzILOpEffect *op_ASSIGN_8 = SETL("offset", CAST(32, IL_FALSE, VARL("U"))); + + // Rdd = ((st64) ((width != 0x0) ? extract64((((ut64) Rss) >> offset), 0x0, width) : ((ut64) 0x0))); + RzILOpPure *op_NE_12 = INV(EQ(VARL("width"), SN(32, 0))); + RzILOpPure *op_RSHIFT_15 = SHIFTR0(CAST(64, IL_FALSE, Rss), VARL("offset")); + RzILOpPure *cond_20 = ITE(op_NE_12, EXTRACT64(op_RSHIFT_15, SN(32, 0), VARL("width")), CAST(64, IL_FALSE, SN(64, 0))); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, cond_20)); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_0, imm_assign_5, op_ASSIGN_3, op_ASSIGN_8, op_ASSIGN_22); + return instruction_sequence; +} + +// Rdd = extractu(Rss,Rtt) +RzILOpEffect *hex_il_op_s2_extractup_rp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + // Declare: st32 width; + // Declare: st32 offset; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // width = ((st32) extract64(((ut64) ((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff)))), 0x0, 0x6)); + RzILOpPure *op_RSHIFT_7 = SHIFTRA(Rtt, SN(32, 0x20)); + RzILOpPure *op_AND_9 = LOGAND(op_RSHIFT_7, SN(64, 0xffffffff)); + RzILOpEffect *op_ASSIGN_18 = SETL("width", CAST(32, IL_FALSE, EXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_9), DUP(op_AND_9))), CAST(32, MSB(DUP(op_AND_9)), DUP(op_AND_9)))), SN(32, 0), SN(32, 6)))); + + // offset = ((st32) sextract64(((ut64) ((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff)))), 0x0, 0x7)); + RzILOpPure *op_RSHIFT_26 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_28 = LOGAND(op_RSHIFT_26, SN(64, 0xffffffff)); + RzILOpEffect *op_ASSIGN_37 = SETL("offset", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_28), DUP(op_AND_28))), CAST(32, MSB(DUP(op_AND_28)), DUP(op_AND_28)))), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(DUP(op_AND_28)), DUP(op_AND_28))), CAST(32, MSB(DUP(op_AND_28)), DUP(op_AND_28)))), SN(32, 0), SN(32, 7)))); + + // Rdd = ((st64) ((width != 0x0) ? extract64(((offset < 0x0) ? ((((ut64) Rss) << (-offset) - 0x1) << 0x1) : (((ut64) Rss) >> offset)), 0x0, width) : ((ut64) 0x0))); + RzILOpPure *op_NE_41 = INV(EQ(VARL("width"), SN(32, 0))); + RzILOpPure *op_LT_43 = SLT(VARL("offset"), SN(32, 0)); + RzILOpPure *op_NEG_46 = NEG(VARL("offset")); + RzILOpPure *op_SUB_48 = SUB(op_NEG_46, SN(32, 1)); + RzILOpPure *op_LSHIFT_49 = SHIFTL0(CAST(64, IL_FALSE, Rss), op_SUB_48); + RzILOpPure *op_LSHIFT_51 = SHIFTL0(op_LSHIFT_49, SN(32, 1)); + RzILOpPure *op_RSHIFT_53 = SHIFTR0(CAST(64, IL_FALSE, DUP(Rss)), VARL("offset")); + RzILOpPure *cond_54 = ITE(op_LT_43, op_LSHIFT_51, op_RSHIFT_53); + RzILOpPure *cond_59 = ITE(op_NE_41, EXTRACT64(cond_54, SN(32, 0), VARL("width")), CAST(64, IL_FALSE, SN(64, 0))); + RzILOpEffect *op_ASSIGN_61 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, cond_59)); + + RzILOpEffect *instruction_sequence = SEQN(3, op_ASSIGN_18, op_ASSIGN_37, op_ASSIGN_61); + return instruction_sequence; +} + +// Rx = insert(Rs,Ii,II) +RzILOpEffect *hex_il_op_s2_insert(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: st32 width; + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: st32 offset; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // width = ((st32) u); + RzILOpEffect *op_ASSIGN_3 = SETL("width", CAST(32, IL_FALSE, VARL("u"))); + + // U = U; + RzILOpEffect *imm_assign_5 = SETL("U", U); + + // offset = ((st32) U); + RzILOpEffect *op_ASSIGN_8 = SETL("offset", CAST(32, IL_FALSE, VARL("U"))); + + // Rx = (Rx & ((st32) (~((0x1 << width) - ((st64) 0x1) << offset)))); + RzILOpPure *op_LSHIFT_12 = SHIFTL0(SN(64, 1), VARL("width")); + RzILOpPure *op_SUB_15 = SUB(op_LSHIFT_12, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *op_LSHIFT_16 = SHIFTL0(op_SUB_15, VARL("offset")); + RzILOpPure *op_NOT_17 = LOGNOT(op_LSHIFT_16); + RzILOpPure *op_AND_19 = LOGAND(READ_REG(pkt, Rx_op, false), CAST(32, MSB(op_NOT_17), DUP(op_NOT_17))); + RzILOpEffect *op_ASSIGN_AND_20 = WRITE_REG(bundle, Rx_op, op_AND_19); + + // Rx = (Rx | ((st32) ((((st64) Rs) & (0x1 << width) - ((st64) 0x1)) << offset))); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(SN(64, 1), VARL("width")); + RzILOpPure *op_SUB_26 = SUB(op_LSHIFT_23, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *op_AND_28 = LOGAND(CAST(64, MSB(Rs), DUP(Rs)), op_SUB_26); + RzILOpPure *op_LSHIFT_29 = SHIFTL0(op_AND_28, VARL("offset")); + RzILOpPure *op_OR_31 = LOGOR(READ_REG(pkt, Rx_op, false), CAST(32, MSB(op_LSHIFT_29), DUP(op_LSHIFT_29))); + RzILOpEffect *op_ASSIGN_OR_32 = WRITE_REG(bundle, Rx_op, op_OR_31); + + RzILOpEffect *instruction_sequence = SEQN(6, imm_assign_0, imm_assign_5, op_ASSIGN_3, op_ASSIGN_8, op_ASSIGN_AND_20, op_ASSIGN_OR_32); + return instruction_sequence; +} + +// Rx = insert(Rs,Rtt) +RzILOpEffect *hex_il_op_s2_insert_rp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + // Declare: st32 width; + // Declare: st32 offset; + // Declare: ut64 mask; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // width = ((st32) extract64(((ut64) ((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff)))), 0x0, 0x6)); + RzILOpPure *op_RSHIFT_7 = SHIFTRA(Rtt, SN(32, 0x20)); + RzILOpPure *op_AND_9 = LOGAND(op_RSHIFT_7, SN(64, 0xffffffff)); + RzILOpEffect *op_ASSIGN_18 = SETL("width", CAST(32, IL_FALSE, EXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_9), DUP(op_AND_9))), CAST(32, MSB(DUP(op_AND_9)), DUP(op_AND_9)))), SN(32, 0), SN(32, 6)))); + + // offset = ((st32) sextract64(((ut64) ((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff)))), 0x0, 0x7)); + RzILOpPure *op_RSHIFT_26 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_28 = LOGAND(op_RSHIFT_26, SN(64, 0xffffffff)); + RzILOpEffect *op_ASSIGN_37 = SETL("offset", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_28), DUP(op_AND_28))), CAST(32, MSB(DUP(op_AND_28)), DUP(op_AND_28)))), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(DUP(op_AND_28)), DUP(op_AND_28))), CAST(32, MSB(DUP(op_AND_28)), DUP(op_AND_28)))), SN(32, 0), SN(32, 7)))); + + // mask = ((ut64) (0x1 << width) - ((st64) 0x1)); + RzILOpPure *op_LSHIFT_40 = SHIFTL0(SN(64, 1), VARL("width")); + RzILOpPure *op_SUB_43 = SUB(op_LSHIFT_40, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpEffect *op_ASSIGN_45 = SETL("mask", CAST(64, IL_FALSE, op_SUB_43)); + + // Rx = 0x0; + RzILOpEffect *op_ASSIGN_51 = WRITE_REG(bundle, Rx_op, SN(32, 0)); + + // Rx = (Rx & ((st32) (~(mask << offset)))); + RzILOpPure *op_LSHIFT_52 = SHIFTL0(VARL("mask"), VARL("offset")); + RzILOpPure *op_NOT_53 = LOGNOT(op_LSHIFT_52); + RzILOpPure *op_AND_55 = LOGAND(READ_REG(pkt, Rx_op, false), CAST(32, IL_FALSE, op_NOT_53)); + RzILOpEffect *op_ASSIGN_AND_56 = WRITE_REG(bundle, Rx_op, op_AND_55); + + // Rx = (Rx | ((st32) ((((ut64) Rs) & mask) << offset))); + RzILOpPure *op_AND_59 = LOGAND(CAST(64, IL_FALSE, Rs), VARL("mask")); + RzILOpPure *op_LSHIFT_60 = SHIFTL0(op_AND_59, VARL("offset")); + RzILOpPure *op_OR_62 = LOGOR(READ_REG(pkt, Rx_op, false), CAST(32, IL_FALSE, op_LSHIFT_60)); + RzILOpEffect *op_ASSIGN_OR_63 = WRITE_REG(bundle, Rx_op, op_OR_62); + + // seq(Rx = 0x0); + RzILOpEffect *seq_then_64 = op_ASSIGN_51; + + // seq(Rx = (Rx & ((st32) (~(mask << offset)))); Rx = (Rx | ((st32) ...; + RzILOpEffect *seq_else_65 = SEQN(2, op_ASSIGN_AND_56, op_ASSIGN_OR_63); + + // if ((offset < 0x0)) {seq(Rx = 0x0)} else {seq(Rx = (Rx & ((st32) (~(mask << offset)))); Rx = (Rx | ((st32) ...}; + RzILOpPure *op_LT_48 = SLT(VARL("offset"), SN(32, 0)); + RzILOpEffect *branch_66 = BRANCH(op_LT_48, seq_then_64, seq_else_65); + + RzILOpEffect *instruction_sequence = SEQN(4, op_ASSIGN_18, op_ASSIGN_37, op_ASSIGN_45, branch_66); + return instruction_sequence; +} + +// Rxx = insert(Rss,Ii,II) +RzILOpEffect *hex_il_op_s2_insertp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: st32 width; + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: st32 offset; + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // width = ((st32) u); + RzILOpEffect *op_ASSIGN_3 = SETL("width", CAST(32, IL_FALSE, VARL("u"))); + + // U = U; + RzILOpEffect *imm_assign_5 = SETL("U", U); + + // offset = ((st32) U); + RzILOpEffect *op_ASSIGN_8 = SETL("offset", CAST(32, IL_FALSE, VARL("U"))); + + // Rxx = (Rxx & (~((0x1 << width) - ((st64) 0x1) << offset))); + RzILOpPure *op_LSHIFT_12 = SHIFTL0(SN(64, 1), VARL("width")); + RzILOpPure *op_SUB_15 = SUB(op_LSHIFT_12, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *op_LSHIFT_16 = SHIFTL0(op_SUB_15, VARL("offset")); + RzILOpPure *op_NOT_17 = LOGNOT(op_LSHIFT_16); + RzILOpPure *op_AND_18 = LOGAND(READ_REG(pkt, Rxx_op, false), op_NOT_17); + RzILOpEffect *op_ASSIGN_AND_19 = WRITE_REG(bundle, Rxx_op, op_AND_18); + + // Rxx = (Rxx | ((Rss & (0x1 << width) - ((st64) 0x1)) << offset)); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(SN(64, 1), VARL("width")); + RzILOpPure *op_SUB_25 = SUB(op_LSHIFT_22, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *op_AND_26 = LOGAND(Rss, op_SUB_25); + RzILOpPure *op_LSHIFT_27 = SHIFTL0(op_AND_26, VARL("offset")); + RzILOpPure *op_OR_28 = LOGOR(READ_REG(pkt, Rxx_op, false), op_LSHIFT_27); + RzILOpEffect *op_ASSIGN_OR_29 = WRITE_REG(bundle, Rxx_op, op_OR_28); + + RzILOpEffect *instruction_sequence = SEQN(6, imm_assign_0, imm_assign_5, op_ASSIGN_3, op_ASSIGN_8, op_ASSIGN_AND_19, op_ASSIGN_OR_29); + return instruction_sequence; +} + +// Rxx = insert(Rss,Rtt) +RzILOpEffect *hex_il_op_s2_insertp_rp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + // Declare: st32 width; + // Declare: st32 offset; + // Declare: ut64 mask; + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // width = ((st32) extract64(((ut64) ((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff)))), 0x0, 0x6)); + RzILOpPure *op_RSHIFT_7 = SHIFTRA(Rtt, SN(32, 0x20)); + RzILOpPure *op_AND_9 = LOGAND(op_RSHIFT_7, SN(64, 0xffffffff)); + RzILOpEffect *op_ASSIGN_18 = SETL("width", CAST(32, IL_FALSE, EXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_9), DUP(op_AND_9))), CAST(32, MSB(DUP(op_AND_9)), DUP(op_AND_9)))), SN(32, 0), SN(32, 6)))); + + // offset = ((st32) sextract64(((ut64) ((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff)))), 0x0, 0x7)); + RzILOpPure *op_RSHIFT_26 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_28 = LOGAND(op_RSHIFT_26, SN(64, 0xffffffff)); + RzILOpEffect *op_ASSIGN_37 = SETL("offset", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_28), DUP(op_AND_28))), CAST(32, MSB(DUP(op_AND_28)), DUP(op_AND_28)))), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(DUP(op_AND_28)), DUP(op_AND_28))), CAST(32, MSB(DUP(op_AND_28)), DUP(op_AND_28)))), SN(32, 0), SN(32, 7)))); + + // mask = ((ut64) (0x1 << width) - ((st64) 0x1)); + RzILOpPure *op_LSHIFT_40 = SHIFTL0(SN(64, 1), VARL("width")); + RzILOpPure *op_SUB_43 = SUB(op_LSHIFT_40, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpEffect *op_ASSIGN_45 = SETL("mask", CAST(64, IL_FALSE, op_SUB_43)); + + // Rxx = ((st64) 0x0); + RzILOpEffect *op_ASSIGN_52 = WRITE_REG(bundle, Rxx_op, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + + // Rxx = (Rxx & ((st64) (~(mask << offset)))); + RzILOpPure *op_LSHIFT_53 = SHIFTL0(VARL("mask"), VARL("offset")); + RzILOpPure *op_NOT_54 = LOGNOT(op_LSHIFT_53); + RzILOpPure *op_AND_56 = LOGAND(READ_REG(pkt, Rxx_op, false), CAST(64, IL_FALSE, op_NOT_54)); + RzILOpEffect *op_ASSIGN_AND_57 = WRITE_REG(bundle, Rxx_op, op_AND_56); + + // Rxx = (Rxx | ((st64) ((((ut64) Rss) & mask) << offset))); + RzILOpPure *op_AND_60 = LOGAND(CAST(64, IL_FALSE, Rss), VARL("mask")); + RzILOpPure *op_LSHIFT_61 = SHIFTL0(op_AND_60, VARL("offset")); + RzILOpPure *op_OR_63 = LOGOR(READ_REG(pkt, Rxx_op, false), CAST(64, IL_FALSE, op_LSHIFT_61)); + RzILOpEffect *op_ASSIGN_OR_64 = WRITE_REG(bundle, Rxx_op, op_OR_63); + + // seq(Rxx = ((st64) 0x0)); + RzILOpEffect *seq_then_65 = op_ASSIGN_52; + + // seq(Rxx = (Rxx & ((st64) (~(mask << offset)))); Rxx = (Rxx | ((s ...; + RzILOpEffect *seq_else_66 = SEQN(2, op_ASSIGN_AND_57, op_ASSIGN_OR_64); + + // if ((offset < 0x0)) {seq(Rxx = ((st64) 0x0))} else {seq(Rxx = (Rxx & ((st64) (~(mask << offset)))); Rxx = (Rxx | ((s ...}; + RzILOpPure *op_LT_48 = SLT(VARL("offset"), SN(32, 0)); + RzILOpEffect *branch_67 = BRANCH(op_LT_48, seq_then_65, seq_else_66); + + RzILOpEffect *instruction_sequence = SEQN(4, op_ASSIGN_18, op_ASSIGN_37, op_ASSIGN_45, branch_67); + return instruction_sequence; +} + +// Rdd = interleave(Rss) +RzILOpEffect *hex_il_op_s2_interleave(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rdd = lfs(Rss,Rtt) +RzILOpEffect *hex_il_op_s2_lfsp(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rdd = lsl(Rss,Rt) +RzILOpEffect *hex_il_op_s2_lsl_r_p(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rdd = ((st64) ((shamt < 0x0) ? ((((ut64) Rss) >> (-shamt) - 0x1) >> 0x1) : (((ut64) Rss) << shamt))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_17 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_19 = SUB(op_NEG_17, SN(32, 1)); + RzILOpPure *op_RSHIFT_20 = SHIFTR0(CAST(64, IL_FALSE, Rss), op_SUB_19); + RzILOpPure *op_RSHIFT_22 = SHIFTR0(op_RSHIFT_20, SN(32, 1)); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(CAST(64, IL_FALSE, DUP(Rss)), VARL("shamt")); + RzILOpPure *cond_25 = ITE(op_LT_14, op_RSHIFT_22, op_LSHIFT_24); + RzILOpEffect *op_ASSIGN_27 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, cond_25)); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_27); + return instruction_sequence; +} + +// Rxx += lsl(Rss,Rt) +RzILOpEffect *hex_il_op_s2_lsl_r_p_acc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rxx = ((st64) ((ut64) Rxx) + ((shamt < 0x0) ? ((((ut64) Rss) >> (-shamt) - 0x1) >> 0x1) : (((ut64) Rss) << shamt))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_17 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_19 = SUB(op_NEG_17, SN(32, 1)); + RzILOpPure *op_RSHIFT_20 = SHIFTR0(CAST(64, IL_FALSE, Rss), op_SUB_19); + RzILOpPure *op_RSHIFT_22 = SHIFTR0(op_RSHIFT_20, SN(32, 1)); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(CAST(64, IL_FALSE, DUP(Rss)), VARL("shamt")); + RzILOpPure *cond_25 = ITE(op_LT_14, op_RSHIFT_22, op_LSHIFT_24); + RzILOpPure *op_ADD_27 = ADD(CAST(64, IL_FALSE, READ_REG(pkt, Rxx_op, false)), cond_25); + RzILOpEffect *op_ASSIGN_29 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_ADD_27)); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_29); + return instruction_sequence; +} + +// Rxx &= lsl(Rss,Rt) +RzILOpEffect *hex_il_op_s2_lsl_r_p_and(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rxx = ((st64) (((ut64) Rxx) & ((shamt < 0x0) ? ((((ut64) Rss) >> (-shamt) - 0x1) >> 0x1) : (((ut64) Rss) << shamt)))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_17 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_19 = SUB(op_NEG_17, SN(32, 1)); + RzILOpPure *op_RSHIFT_20 = SHIFTR0(CAST(64, IL_FALSE, Rss), op_SUB_19); + RzILOpPure *op_RSHIFT_22 = SHIFTR0(op_RSHIFT_20, SN(32, 1)); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(CAST(64, IL_FALSE, DUP(Rss)), VARL("shamt")); + RzILOpPure *cond_25 = ITE(op_LT_14, op_RSHIFT_22, op_LSHIFT_24); + RzILOpPure *op_AND_27 = LOGAND(CAST(64, IL_FALSE, READ_REG(pkt, Rxx_op, false)), cond_25); + RzILOpEffect *op_ASSIGN_29 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_AND_27)); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_29); + return instruction_sequence; +} + +// Rxx -= lsl(Rss,Rt) +RzILOpEffect *hex_il_op_s2_lsl_r_p_nac(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rxx = ((st64) ((ut64) Rxx) - ((shamt < 0x0) ? ((((ut64) Rss) >> (-shamt) - 0x1) >> 0x1) : (((ut64) Rss) << shamt))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_17 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_19 = SUB(op_NEG_17, SN(32, 1)); + RzILOpPure *op_RSHIFT_20 = SHIFTR0(CAST(64, IL_FALSE, Rss), op_SUB_19); + RzILOpPure *op_RSHIFT_22 = SHIFTR0(op_RSHIFT_20, SN(32, 1)); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(CAST(64, IL_FALSE, DUP(Rss)), VARL("shamt")); + RzILOpPure *cond_25 = ITE(op_LT_14, op_RSHIFT_22, op_LSHIFT_24); + RzILOpPure *op_SUB_27 = SUB(CAST(64, IL_FALSE, READ_REG(pkt, Rxx_op, false)), cond_25); + RzILOpEffect *op_ASSIGN_29 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_SUB_27)); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_29); + return instruction_sequence; +} + +// Rxx |= lsl(Rss,Rt) +RzILOpEffect *hex_il_op_s2_lsl_r_p_or(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rxx = ((st64) (((ut64) Rxx) | ((shamt < 0x0) ? ((((ut64) Rss) >> (-shamt) - 0x1) >> 0x1) : (((ut64) Rss) << shamt)))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_17 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_19 = SUB(op_NEG_17, SN(32, 1)); + RzILOpPure *op_RSHIFT_20 = SHIFTR0(CAST(64, IL_FALSE, Rss), op_SUB_19); + RzILOpPure *op_RSHIFT_22 = SHIFTR0(op_RSHIFT_20, SN(32, 1)); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(CAST(64, IL_FALSE, DUP(Rss)), VARL("shamt")); + RzILOpPure *cond_25 = ITE(op_LT_14, op_RSHIFT_22, op_LSHIFT_24); + RzILOpPure *op_OR_27 = LOGOR(CAST(64, IL_FALSE, READ_REG(pkt, Rxx_op, false)), cond_25); + RzILOpEffect *op_ASSIGN_29 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_OR_27)); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_29); + return instruction_sequence; +} + +// Rxx ^= lsl(Rss,Rt) +RzILOpEffect *hex_il_op_s2_lsl_r_p_xor(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rxx = ((st64) (((ut64) Rxx) ^ ((shamt < 0x0) ? ((((ut64) Rss) >> (-shamt) - 0x1) >> 0x1) : (((ut64) Rss) << shamt)))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_17 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_19 = SUB(op_NEG_17, SN(32, 1)); + RzILOpPure *op_RSHIFT_20 = SHIFTR0(CAST(64, IL_FALSE, Rss), op_SUB_19); + RzILOpPure *op_RSHIFT_22 = SHIFTR0(op_RSHIFT_20, SN(32, 1)); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(CAST(64, IL_FALSE, DUP(Rss)), VARL("shamt")); + RzILOpPure *cond_25 = ITE(op_LT_14, op_RSHIFT_22, op_LSHIFT_24); + RzILOpPure *op_XOR_27 = LOGXOR(CAST(64, IL_FALSE, READ_REG(pkt, Rxx_op, false)), cond_25); + RzILOpEffect *op_ASSIGN_29 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_XOR_27)); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_29); + return instruction_sequence; +} + +// Rd = lsl(Rs,Rt) +RzILOpEffect *hex_il_op_s2_lsl_r_r(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rd = ((st32) ((shamt < 0x0) ? ((((ut64) ((ut32) Rs)) >> (-shamt) - 0x1) >> 0x1) : (((ut64) ((ut32) Rs)) << shamt))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_18 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_20 = SUB(op_NEG_18, SN(32, 1)); + RzILOpPure *op_RSHIFT_21 = SHIFTR0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, Rs)), op_SUB_20); + RzILOpPure *op_RSHIFT_23 = SHIFTR0(op_RSHIFT_21, SN(32, 1)); + RzILOpPure *op_LSHIFT_26 = SHIFTL0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, DUP(Rs))), VARL("shamt")); + RzILOpPure *cond_27 = ITE(op_LT_14, op_RSHIFT_23, op_LSHIFT_26); + RzILOpEffect *op_ASSIGN_29 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, cond_27)); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_29); + return instruction_sequence; +} + +// Rx += lsl(Rs,Rt) +RzILOpEffect *hex_il_op_s2_lsl_r_r_acc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rx = ((st32) ((ut64) Rx) + ((shamt < 0x0) ? ((((ut64) ((ut32) Rs)) >> (-shamt) - 0x1) >> 0x1) : (((ut64) ((ut32) Rs)) << shamt))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_18 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_20 = SUB(op_NEG_18, SN(32, 1)); + RzILOpPure *op_RSHIFT_21 = SHIFTR0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, Rs)), op_SUB_20); + RzILOpPure *op_RSHIFT_23 = SHIFTR0(op_RSHIFT_21, SN(32, 1)); + RzILOpPure *op_LSHIFT_26 = SHIFTL0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, DUP(Rs))), VARL("shamt")); + RzILOpPure *cond_27 = ITE(op_LT_14, op_RSHIFT_23, op_LSHIFT_26); + RzILOpPure *op_ADD_29 = ADD(CAST(64, IL_FALSE, READ_REG(pkt, Rx_op, false)), cond_27); + RzILOpEffect *op_ASSIGN_31 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_ADD_29)); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_31); + return instruction_sequence; +} + +// Rx &= lsl(Rs,Rt) +RzILOpEffect *hex_il_op_s2_lsl_r_r_and(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rx = ((st32) (((ut64) Rx) & ((shamt < 0x0) ? ((((ut64) ((ut32) Rs)) >> (-shamt) - 0x1) >> 0x1) : (((ut64) ((ut32) Rs)) << shamt)))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_18 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_20 = SUB(op_NEG_18, SN(32, 1)); + RzILOpPure *op_RSHIFT_21 = SHIFTR0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, Rs)), op_SUB_20); + RzILOpPure *op_RSHIFT_23 = SHIFTR0(op_RSHIFT_21, SN(32, 1)); + RzILOpPure *op_LSHIFT_26 = SHIFTL0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, DUP(Rs))), VARL("shamt")); + RzILOpPure *cond_27 = ITE(op_LT_14, op_RSHIFT_23, op_LSHIFT_26); + RzILOpPure *op_AND_29 = LOGAND(CAST(64, IL_FALSE, READ_REG(pkt, Rx_op, false)), cond_27); + RzILOpEffect *op_ASSIGN_31 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_AND_29)); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_31); + return instruction_sequence; +} + +// Rx -= lsl(Rs,Rt) +RzILOpEffect *hex_il_op_s2_lsl_r_r_nac(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rx = ((st32) ((ut64) Rx) - ((shamt < 0x0) ? ((((ut64) ((ut32) Rs)) >> (-shamt) - 0x1) >> 0x1) : (((ut64) ((ut32) Rs)) << shamt))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_18 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_20 = SUB(op_NEG_18, SN(32, 1)); + RzILOpPure *op_RSHIFT_21 = SHIFTR0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, Rs)), op_SUB_20); + RzILOpPure *op_RSHIFT_23 = SHIFTR0(op_RSHIFT_21, SN(32, 1)); + RzILOpPure *op_LSHIFT_26 = SHIFTL0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, DUP(Rs))), VARL("shamt")); + RzILOpPure *cond_27 = ITE(op_LT_14, op_RSHIFT_23, op_LSHIFT_26); + RzILOpPure *op_SUB_29 = SUB(CAST(64, IL_FALSE, READ_REG(pkt, Rx_op, false)), cond_27); + RzILOpEffect *op_ASSIGN_31 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_SUB_29)); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_31); + return instruction_sequence; +} + +// Rx |= lsl(Rs,Rt) +RzILOpEffect *hex_il_op_s2_lsl_r_r_or(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rx = ((st32) (((ut64) Rx) | ((shamt < 0x0) ? ((((ut64) ((ut32) Rs)) >> (-shamt) - 0x1) >> 0x1) : (((ut64) ((ut32) Rs)) << shamt)))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_18 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_20 = SUB(op_NEG_18, SN(32, 1)); + RzILOpPure *op_RSHIFT_21 = SHIFTR0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, Rs)), op_SUB_20); + RzILOpPure *op_RSHIFT_23 = SHIFTR0(op_RSHIFT_21, SN(32, 1)); + RzILOpPure *op_LSHIFT_26 = SHIFTL0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, DUP(Rs))), VARL("shamt")); + RzILOpPure *cond_27 = ITE(op_LT_14, op_RSHIFT_23, op_LSHIFT_26); + RzILOpPure *op_OR_29 = LOGOR(CAST(64, IL_FALSE, READ_REG(pkt, Rx_op, false)), cond_27); + RzILOpEffect *op_ASSIGN_31 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_OR_29)); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_31); + return instruction_sequence; +} + +// Rdd = vlslh(Rss,Rt) +RzILOpEffect *hex_il_op_s2_lsl_r_vh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp496 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp496", VARL("i")); + + // seq(h_tmp496 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | ((((sextract64(((ut64) Rt), 0x0, 0x7) < ((st64) 0x0)) ? ((((ut64) ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) >> (-sextract64(((ut64) Rt), 0x0, 0x7)) - ((st64) 0x1)) >> 0x1) : (((ut64) ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) << sextract64(((ut64) Rt), 0x0, 0x7))) & ((ut64) 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_LT_27 = SLT(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7)), CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_MUL_30 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_31 = SHIFTRA(Rss, op_MUL_30); + RzILOpPure *op_AND_34 = LOGAND(op_RSHIFT_31, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_45 = NEG(SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7))); + RzILOpPure *op_SUB_48 = SUB(op_NEG_45, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *op_RSHIFT_49 = SHIFTR0(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_34)), op_SUB_48); + RzILOpPure *op_RSHIFT_51 = SHIFTR0(op_RSHIFT_49, SN(32, 1)); + RzILOpPure *op_MUL_53 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_54 = SHIFTRA(DUP(Rss), op_MUL_53); + RzILOpPure *op_AND_57 = LOGAND(op_RSHIFT_54, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_68 = SHIFTL0(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_57)), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7))); + RzILOpPure *cond_69 = ITE(op_LT_27, op_RSHIFT_51, op_LSHIFT_68); + RzILOpPure *op_AND_72 = LOGAND(cond_69, CAST(64, IL_FALSE, SN(32, 0xffff))); + RzILOpPure *op_MUL_74 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_75 = SHIFTL0(op_AND_72, op_MUL_74); + RzILOpPure *op_OR_77 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_75); + RzILOpEffect *op_ASSIGN_79 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_77)); + + // seq(h_tmp496; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x1 ...; + RzILOpEffect *seq_81 = op_ASSIGN_79; + + // seq(seq(h_tmp496; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_82 = SEQN(2, seq_81, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp496; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_83 = REPEAT(op_LT_4, seq_82); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp496; Rdd = ((st64) ...; + RzILOpEffect *seq_84 = SEQN(2, op_ASSIGN_2, for_83); + + RzILOpEffect *instruction_sequence = seq_84; + return instruction_sequence; +} + +// Rdd = vlslw(Rss,Rt) +RzILOpEffect *hex_il_op_s2_lsl_r_vw(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp497 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp497", VARL("i")); + + // seq(h_tmp497 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffffffff << i * 0x20)))) | ((((sextract64(((ut64) Rt), 0x0, 0x7) < ((st64) 0x0)) ? ((((ut64) ((ut32) ((ut64) ((ut32) ((Rss >> i * 0x20) & 0xffffffff))))) >> (-sextract64(((ut64) Rt), 0x0, 0x7)) - ((st64) 0x1)) >> 0x1) : (((ut64) ((ut32) ((ut64) ((ut32) ((Rss >> i * 0x20) & 0xffffffff))))) << sextract64(((ut64) Rt), 0x0, 0x7))) & ((ut64) 0xffffffff)) << i * 0x20))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffffffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_LT_27 = SLT(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7)), CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_MUL_30 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_31 = SHIFTRA(Rss, op_MUL_30); + RzILOpPure *op_AND_33 = LOGAND(op_RSHIFT_31, SN(64, 0xffffffff)); + RzILOpPure *op_NEG_46 = NEG(SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7))); + RzILOpPure *op_SUB_49 = SUB(op_NEG_46, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *op_RSHIFT_50 = SHIFTR0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_33)))), op_SUB_49); + RzILOpPure *op_RSHIFT_52 = SHIFTR0(op_RSHIFT_50, SN(32, 1)); + RzILOpPure *op_MUL_54 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_55 = SHIFTRA(DUP(Rss), op_MUL_54); + RzILOpPure *op_AND_57 = LOGAND(op_RSHIFT_55, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_70 = SHIFTL0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_57)))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7))); + RzILOpPure *cond_71 = ITE(op_LT_27, op_RSHIFT_52, op_LSHIFT_70); + RzILOpPure *op_AND_74 = LOGAND(cond_71, CAST(64, IL_FALSE, SN(64, 0xffffffff))); + RzILOpPure *op_MUL_76 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_77 = SHIFTL0(op_AND_74, op_MUL_76); + RzILOpPure *op_OR_79 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_77); + RzILOpEffect *op_ASSIGN_81 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_79)); + + // seq(h_tmp497; Rdd = ((st64) (((ut64) (Rdd & (~(0xffffffff << i * ...; + RzILOpEffect *seq_83 = op_ASSIGN_81; + + // seq(seq(h_tmp497; Rdd = ((st64) (((ut64) (Rdd & (~(0xffffffff << ...; + RzILOpEffect *seq_84 = SEQN(2, seq_83, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp497; Rdd = ((st64) (((ut64) (Rdd & (~(0xffffffff << ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_85 = REPEAT(op_LT_4, seq_84); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp497; Rdd = ((st64) ...; + RzILOpEffect *seq_86 = SEQN(2, op_ASSIGN_2, for_85); + + RzILOpEffect *instruction_sequence = seq_86; + return instruction_sequence; +} + +// Rdd = lsr(Rss,Ii) +RzILOpEffect *hex_il_op_s2_lsr_i_p(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rdd = ((st64) ((u >= ((ut32) 0x40)) ? ((ut64) 0x0) : (((ut64) Rss) >> u))); + RzILOpPure *op_GE_8 = UGE(VARL("u"), CAST(32, IL_FALSE, SN(32, 0x40))); + RzILOpPure *op_RSHIFT_11 = SHIFTR0(CAST(64, IL_FALSE, Rss), VARL("u")); + RzILOpPure *cond_13 = ITE(op_GE_8, CAST(64, IL_FALSE, SN(32, 0)), op_RSHIFT_11); + RzILOpEffect *op_ASSIGN_15 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, cond_13)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_15); + return instruction_sequence; +} + +// Rxx += lsr(Rss,Ii) +RzILOpEffect *hex_il_op_s2_lsr_i_p_acc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rxx = ((st64) ((ut64) Rxx) + ((u >= ((ut32) 0x40)) ? ((ut64) 0x0) : (((ut64) Rss) >> u))); + RzILOpPure *op_GE_8 = UGE(VARL("u"), CAST(32, IL_FALSE, SN(32, 0x40))); + RzILOpPure *op_RSHIFT_11 = SHIFTR0(CAST(64, IL_FALSE, Rss), VARL("u")); + RzILOpPure *cond_13 = ITE(op_GE_8, CAST(64, IL_FALSE, SN(32, 0)), op_RSHIFT_11); + RzILOpPure *op_ADD_15 = ADD(CAST(64, IL_FALSE, READ_REG(pkt, Rxx_op, false)), cond_13); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_ADD_15)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_17); + return instruction_sequence; +} + +// Rxx &= lsr(Rss,Ii) +RzILOpEffect *hex_il_op_s2_lsr_i_p_and(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rxx = ((st64) (((ut64) Rxx) & ((u >= ((ut32) 0x40)) ? ((ut64) 0x0) : (((ut64) Rss) >> u)))); + RzILOpPure *op_GE_8 = UGE(VARL("u"), CAST(32, IL_FALSE, SN(32, 0x40))); + RzILOpPure *op_RSHIFT_11 = SHIFTR0(CAST(64, IL_FALSE, Rss), VARL("u")); + RzILOpPure *cond_13 = ITE(op_GE_8, CAST(64, IL_FALSE, SN(32, 0)), op_RSHIFT_11); + RzILOpPure *op_AND_15 = LOGAND(CAST(64, IL_FALSE, READ_REG(pkt, Rxx_op, false)), cond_13); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_AND_15)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_17); + return instruction_sequence; +} + +// Rxx -= lsr(Rss,Ii) +RzILOpEffect *hex_il_op_s2_lsr_i_p_nac(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rxx = ((st64) ((ut64) Rxx) - ((u >= ((ut32) 0x40)) ? ((ut64) 0x0) : (((ut64) Rss) >> u))); + RzILOpPure *op_GE_8 = UGE(VARL("u"), CAST(32, IL_FALSE, SN(32, 0x40))); + RzILOpPure *op_RSHIFT_11 = SHIFTR0(CAST(64, IL_FALSE, Rss), VARL("u")); + RzILOpPure *cond_13 = ITE(op_GE_8, CAST(64, IL_FALSE, SN(32, 0)), op_RSHIFT_11); + RzILOpPure *op_SUB_15 = SUB(CAST(64, IL_FALSE, READ_REG(pkt, Rxx_op, false)), cond_13); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_SUB_15)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_17); + return instruction_sequence; +} + +// Rxx |= lsr(Rss,Ii) +RzILOpEffect *hex_il_op_s2_lsr_i_p_or(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rxx = ((st64) (((ut64) Rxx) | ((u >= ((ut32) 0x40)) ? ((ut64) 0x0) : (((ut64) Rss) >> u)))); + RzILOpPure *op_GE_8 = UGE(VARL("u"), CAST(32, IL_FALSE, SN(32, 0x40))); + RzILOpPure *op_RSHIFT_11 = SHIFTR0(CAST(64, IL_FALSE, Rss), VARL("u")); + RzILOpPure *cond_13 = ITE(op_GE_8, CAST(64, IL_FALSE, SN(32, 0)), op_RSHIFT_11); + RzILOpPure *op_OR_15 = LOGOR(CAST(64, IL_FALSE, READ_REG(pkt, Rxx_op, false)), cond_13); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_OR_15)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_17); + return instruction_sequence; +} + +// Rxx ^= lsr(Rss,Ii) +RzILOpEffect *hex_il_op_s2_lsr_i_p_xacc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rxx = ((st64) (((ut64) Rxx) ^ ((u >= ((ut32) 0x40)) ? ((ut64) 0x0) : (((ut64) Rss) >> u)))); + RzILOpPure *op_GE_8 = UGE(VARL("u"), CAST(32, IL_FALSE, SN(32, 0x40))); + RzILOpPure *op_RSHIFT_11 = SHIFTR0(CAST(64, IL_FALSE, Rss), VARL("u")); + RzILOpPure *cond_13 = ITE(op_GE_8, CAST(64, IL_FALSE, SN(32, 0)), op_RSHIFT_11); + RzILOpPure *op_XOR_15 = LOGXOR(CAST(64, IL_FALSE, READ_REG(pkt, Rxx_op, false)), cond_13); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_XOR_15)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_17); + return instruction_sequence; +} + +// Rd = lsr(Rs,Ii) +RzILOpEffect *hex_il_op_s2_lsr_i_r(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rd = ((st32) ((u >= ((ut32) 0x20)) ? ((ut32) 0x0) : (((ut32) Rs) >> u))); + RzILOpPure *op_GE_8 = UGE(VARL("u"), CAST(32, IL_FALSE, SN(32, 0x20))); + RzILOpPure *op_RSHIFT_11 = SHIFTR0(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpPure *cond_13 = ITE(op_GE_8, CAST(32, IL_FALSE, SN(32, 0)), op_RSHIFT_11); + RzILOpEffect *op_ASSIGN_15 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, cond_13)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_15); + return instruction_sequence; +} + +// Rx += lsr(Rs,Ii) +RzILOpEffect *hex_il_op_s2_lsr_i_r_acc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rx = ((st32) ((ut32) Rx) + ((u >= ((ut32) 0x20)) ? ((ut32) 0x0) : (((ut32) Rs) >> u))); + RzILOpPure *op_GE_8 = UGE(VARL("u"), CAST(32, IL_FALSE, SN(32, 0x20))); + RzILOpPure *op_RSHIFT_11 = SHIFTR0(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpPure *cond_13 = ITE(op_GE_8, CAST(32, IL_FALSE, SN(32, 0)), op_RSHIFT_11); + RzILOpPure *op_ADD_15 = ADD(CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false)), cond_13); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_ADD_15)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_17); + return instruction_sequence; +} + +// Rx &= lsr(Rs,Ii) +RzILOpEffect *hex_il_op_s2_lsr_i_r_and(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rx = ((st32) (((ut32) Rx) & ((u >= ((ut32) 0x20)) ? ((ut32) 0x0) : (((ut32) Rs) >> u)))); + RzILOpPure *op_GE_8 = UGE(VARL("u"), CAST(32, IL_FALSE, SN(32, 0x20))); + RzILOpPure *op_RSHIFT_11 = SHIFTR0(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpPure *cond_13 = ITE(op_GE_8, CAST(32, IL_FALSE, SN(32, 0)), op_RSHIFT_11); + RzILOpPure *op_AND_15 = LOGAND(CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false)), cond_13); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_AND_15)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_17); + return instruction_sequence; +} + +// Rx -= lsr(Rs,Ii) +RzILOpEffect *hex_il_op_s2_lsr_i_r_nac(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rx = ((st32) ((ut32) Rx) - ((u >= ((ut32) 0x20)) ? ((ut32) 0x0) : (((ut32) Rs) >> u))); + RzILOpPure *op_GE_8 = UGE(VARL("u"), CAST(32, IL_FALSE, SN(32, 0x20))); + RzILOpPure *op_RSHIFT_11 = SHIFTR0(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpPure *cond_13 = ITE(op_GE_8, CAST(32, IL_FALSE, SN(32, 0)), op_RSHIFT_11); + RzILOpPure *op_SUB_15 = SUB(CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false)), cond_13); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_SUB_15)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_17); + return instruction_sequence; +} + +// Rx |= lsr(Rs,Ii) +RzILOpEffect *hex_il_op_s2_lsr_i_r_or(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rx = ((st32) (((ut32) Rx) | ((u >= ((ut32) 0x20)) ? ((ut32) 0x0) : (((ut32) Rs) >> u)))); + RzILOpPure *op_GE_8 = UGE(VARL("u"), CAST(32, IL_FALSE, SN(32, 0x20))); + RzILOpPure *op_RSHIFT_11 = SHIFTR0(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpPure *cond_13 = ITE(op_GE_8, CAST(32, IL_FALSE, SN(32, 0)), op_RSHIFT_11); + RzILOpPure *op_OR_15 = LOGOR(CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false)), cond_13); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_OR_15)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_17); + return instruction_sequence; +} + +// Rx ^= lsr(Rs,Ii) +RzILOpEffect *hex_il_op_s2_lsr_i_r_xacc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rx = ((st32) (((ut32) Rx) ^ ((u >= ((ut32) 0x20)) ? ((ut32) 0x0) : (((ut32) Rs) >> u)))); + RzILOpPure *op_GE_8 = UGE(VARL("u"), CAST(32, IL_FALSE, SN(32, 0x20))); + RzILOpPure *op_RSHIFT_11 = SHIFTR0(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpPure *cond_13 = ITE(op_GE_8, CAST(32, IL_FALSE, SN(32, 0)), op_RSHIFT_11); + RzILOpPure *op_XOR_15 = LOGXOR(CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false)), cond_13); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_XOR_15)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_17); + return instruction_sequence; +} + +// Rdd = vlsrh(Rss,Ii) +RzILOpEffect *hex_il_op_s2_lsr_i_vh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp498 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp498", VARL("i")); + + // seq(h_tmp498 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // u = u; + RzILOpEffect *imm_assign_24 = SETL("u", u); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) (((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff))) >> u)) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rss, op_MUL_18); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_26 = SHIFTR0(CAST(16, IL_FALSE, op_AND_22), VARL("u")); + RzILOpPure *op_AND_29 = LOGAND(CAST(32, IL_FALSE, op_RSHIFT_26), SN(32, 0xffff)); + RzILOpPure *op_MUL_32 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_33 = SHIFTL0(CAST(64, IL_FALSE, op_AND_29), op_MUL_32); + RzILOpPure *op_OR_35 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_33); + RzILOpEffect *op_ASSIGN_37 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_35)); + + // seq(h_tmp498; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x1 ...; + RzILOpEffect *seq_39 = op_ASSIGN_37; + + // seq(seq(h_tmp498; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_40 = SEQN(2, seq_39, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp498; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_41 = REPEAT(op_LT_4, seq_40); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp498; Rdd = ((st64) ...; + RzILOpEffect *seq_42 = SEQN(2, op_ASSIGN_2, for_41); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_24, seq_42); + return instruction_sequence; +} + +// Rdd = vlsrw(Rss,Ii) +RzILOpEffect *hex_il_op_s2_lsr_i_vw(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp499 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp499", VARL("i")); + + // seq(h_tmp499 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // u = u; + RzILOpEffect *imm_assign_24 = SETL("u", u); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffffffff << i * 0x20)))) | (((((ut64) ((ut32) ((Rss >> i * 0x20) & 0xffffffff))) >> u) & ((ut64) 0xffffffff)) << i * 0x20))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffffffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rss, op_MUL_18); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_19, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_26 = SHIFTR0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_21)), VARL("u")); + RzILOpPure *op_AND_29 = LOGAND(op_RSHIFT_26, CAST(64, IL_FALSE, SN(64, 0xffffffff))); + RzILOpPure *op_MUL_31 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_32 = SHIFTL0(op_AND_29, op_MUL_31); + RzILOpPure *op_OR_34 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_32); + RzILOpEffect *op_ASSIGN_36 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_34)); + + // seq(h_tmp499; Rdd = ((st64) (((ut64) (Rdd & (~(0xffffffff << i * ...; + RzILOpEffect *seq_38 = op_ASSIGN_36; + + // seq(seq(h_tmp499; Rdd = ((st64) (((ut64) (Rdd & (~(0xffffffff << ...; + RzILOpEffect *seq_39 = SEQN(2, seq_38, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp499; Rdd = ((st64) (((ut64) (Rdd & (~(0xffffffff << ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_40 = REPEAT(op_LT_4, seq_39); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp499; Rdd = ((st64) ...; + RzILOpEffect *seq_41 = SEQN(2, op_ASSIGN_2, for_40); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_24, seq_41); + return instruction_sequence; +} + +// Rdd = lsr(Rss,Rt) +RzILOpEffect *hex_il_op_s2_lsr_r_p(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rdd = ((st64) ((shamt < 0x0) ? ((((ut64) Rss) << (-shamt) - 0x1) << 0x1) : (((ut64) Rss) >> shamt))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_17 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_19 = SUB(op_NEG_17, SN(32, 1)); + RzILOpPure *op_LSHIFT_20 = SHIFTL0(CAST(64, IL_FALSE, Rss), op_SUB_19); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(op_LSHIFT_20, SN(32, 1)); + RzILOpPure *op_RSHIFT_24 = SHIFTR0(CAST(64, IL_FALSE, DUP(Rss)), VARL("shamt")); + RzILOpPure *cond_25 = ITE(op_LT_14, op_LSHIFT_22, op_RSHIFT_24); + RzILOpEffect *op_ASSIGN_27 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, cond_25)); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_27); + return instruction_sequence; +} + +// Rxx += lsr(Rss,Rt) +RzILOpEffect *hex_il_op_s2_lsr_r_p_acc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rxx = ((st64) ((ut64) Rxx) + ((shamt < 0x0) ? ((((ut64) Rss) << (-shamt) - 0x1) << 0x1) : (((ut64) Rss) >> shamt))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_17 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_19 = SUB(op_NEG_17, SN(32, 1)); + RzILOpPure *op_LSHIFT_20 = SHIFTL0(CAST(64, IL_FALSE, Rss), op_SUB_19); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(op_LSHIFT_20, SN(32, 1)); + RzILOpPure *op_RSHIFT_24 = SHIFTR0(CAST(64, IL_FALSE, DUP(Rss)), VARL("shamt")); + RzILOpPure *cond_25 = ITE(op_LT_14, op_LSHIFT_22, op_RSHIFT_24); + RzILOpPure *op_ADD_27 = ADD(CAST(64, IL_FALSE, READ_REG(pkt, Rxx_op, false)), cond_25); + RzILOpEffect *op_ASSIGN_29 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_ADD_27)); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_29); + return instruction_sequence; +} + +// Rxx &= lsr(Rss,Rt) +RzILOpEffect *hex_il_op_s2_lsr_r_p_and(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rxx = ((st64) (((ut64) Rxx) & ((shamt < 0x0) ? ((((ut64) Rss) << (-shamt) - 0x1) << 0x1) : (((ut64) Rss) >> shamt)))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_17 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_19 = SUB(op_NEG_17, SN(32, 1)); + RzILOpPure *op_LSHIFT_20 = SHIFTL0(CAST(64, IL_FALSE, Rss), op_SUB_19); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(op_LSHIFT_20, SN(32, 1)); + RzILOpPure *op_RSHIFT_24 = SHIFTR0(CAST(64, IL_FALSE, DUP(Rss)), VARL("shamt")); + RzILOpPure *cond_25 = ITE(op_LT_14, op_LSHIFT_22, op_RSHIFT_24); + RzILOpPure *op_AND_27 = LOGAND(CAST(64, IL_FALSE, READ_REG(pkt, Rxx_op, false)), cond_25); + RzILOpEffect *op_ASSIGN_29 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_AND_27)); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_29); + return instruction_sequence; +} + +// Rxx -= lsr(Rss,Rt) +RzILOpEffect *hex_il_op_s2_lsr_r_p_nac(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rxx = ((st64) ((ut64) Rxx) - ((shamt < 0x0) ? ((((ut64) Rss) << (-shamt) - 0x1) << 0x1) : (((ut64) Rss) >> shamt))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_17 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_19 = SUB(op_NEG_17, SN(32, 1)); + RzILOpPure *op_LSHIFT_20 = SHIFTL0(CAST(64, IL_FALSE, Rss), op_SUB_19); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(op_LSHIFT_20, SN(32, 1)); + RzILOpPure *op_RSHIFT_24 = SHIFTR0(CAST(64, IL_FALSE, DUP(Rss)), VARL("shamt")); + RzILOpPure *cond_25 = ITE(op_LT_14, op_LSHIFT_22, op_RSHIFT_24); + RzILOpPure *op_SUB_27 = SUB(CAST(64, IL_FALSE, READ_REG(pkt, Rxx_op, false)), cond_25); + RzILOpEffect *op_ASSIGN_29 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_SUB_27)); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_29); + return instruction_sequence; +} + +// Rxx |= lsr(Rss,Rt) +RzILOpEffect *hex_il_op_s2_lsr_r_p_or(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rxx = ((st64) (((ut64) Rxx) | ((shamt < 0x0) ? ((((ut64) Rss) << (-shamt) - 0x1) << 0x1) : (((ut64) Rss) >> shamt)))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_17 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_19 = SUB(op_NEG_17, SN(32, 1)); + RzILOpPure *op_LSHIFT_20 = SHIFTL0(CAST(64, IL_FALSE, Rss), op_SUB_19); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(op_LSHIFT_20, SN(32, 1)); + RzILOpPure *op_RSHIFT_24 = SHIFTR0(CAST(64, IL_FALSE, DUP(Rss)), VARL("shamt")); + RzILOpPure *cond_25 = ITE(op_LT_14, op_LSHIFT_22, op_RSHIFT_24); + RzILOpPure *op_OR_27 = LOGOR(CAST(64, IL_FALSE, READ_REG(pkt, Rxx_op, false)), cond_25); + RzILOpEffect *op_ASSIGN_29 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_OR_27)); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_29); + return instruction_sequence; +} + +// Rxx ^= lsr(Rss,Rt) +RzILOpEffect *hex_il_op_s2_lsr_r_p_xor(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rxx = ((st64) (((ut64) Rxx) ^ ((shamt < 0x0) ? ((((ut64) Rss) << (-shamt) - 0x1) << 0x1) : (((ut64) Rss) >> shamt)))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_17 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_19 = SUB(op_NEG_17, SN(32, 1)); + RzILOpPure *op_LSHIFT_20 = SHIFTL0(CAST(64, IL_FALSE, Rss), op_SUB_19); + RzILOpPure *op_LSHIFT_22 = SHIFTL0(op_LSHIFT_20, SN(32, 1)); + RzILOpPure *op_RSHIFT_24 = SHIFTR0(CAST(64, IL_FALSE, DUP(Rss)), VARL("shamt")); + RzILOpPure *cond_25 = ITE(op_LT_14, op_LSHIFT_22, op_RSHIFT_24); + RzILOpPure *op_XOR_27 = LOGXOR(CAST(64, IL_FALSE, READ_REG(pkt, Rxx_op, false)), cond_25); + RzILOpEffect *op_ASSIGN_29 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_XOR_27)); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_29); + return instruction_sequence; +} + +// Rd = lsr(Rs,Rt) +RzILOpEffect *hex_il_op_s2_lsr_r_r(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rd = ((st32) ((shamt < 0x0) ? ((((ut64) ((ut32) Rs)) << (-shamt) - 0x1) << 0x1) : (((ut64) ((ut32) Rs)) >> shamt))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_18 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_20 = SUB(op_NEG_18, SN(32, 1)); + RzILOpPure *op_LSHIFT_21 = SHIFTL0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, Rs)), op_SUB_20); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(op_LSHIFT_21, SN(32, 1)); + RzILOpPure *op_RSHIFT_26 = SHIFTR0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, DUP(Rs))), VARL("shamt")); + RzILOpPure *cond_27 = ITE(op_LT_14, op_LSHIFT_23, op_RSHIFT_26); + RzILOpEffect *op_ASSIGN_29 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, cond_27)); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_29); + return instruction_sequence; +} + +// Rx += lsr(Rs,Rt) +RzILOpEffect *hex_il_op_s2_lsr_r_r_acc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rx = ((st32) ((ut64) Rx) + ((shamt < 0x0) ? ((((ut64) ((ut32) Rs)) << (-shamt) - 0x1) << 0x1) : (((ut64) ((ut32) Rs)) >> shamt))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_18 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_20 = SUB(op_NEG_18, SN(32, 1)); + RzILOpPure *op_LSHIFT_21 = SHIFTL0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, Rs)), op_SUB_20); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(op_LSHIFT_21, SN(32, 1)); + RzILOpPure *op_RSHIFT_26 = SHIFTR0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, DUP(Rs))), VARL("shamt")); + RzILOpPure *cond_27 = ITE(op_LT_14, op_LSHIFT_23, op_RSHIFT_26); + RzILOpPure *op_ADD_29 = ADD(CAST(64, IL_FALSE, READ_REG(pkt, Rx_op, false)), cond_27); + RzILOpEffect *op_ASSIGN_31 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_ADD_29)); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_31); + return instruction_sequence; +} + +// Rx &= lsr(Rs,Rt) +RzILOpEffect *hex_il_op_s2_lsr_r_r_and(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rx = ((st32) (((ut64) Rx) & ((shamt < 0x0) ? ((((ut64) ((ut32) Rs)) << (-shamt) - 0x1) << 0x1) : (((ut64) ((ut32) Rs)) >> shamt)))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_18 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_20 = SUB(op_NEG_18, SN(32, 1)); + RzILOpPure *op_LSHIFT_21 = SHIFTL0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, Rs)), op_SUB_20); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(op_LSHIFT_21, SN(32, 1)); + RzILOpPure *op_RSHIFT_26 = SHIFTR0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, DUP(Rs))), VARL("shamt")); + RzILOpPure *cond_27 = ITE(op_LT_14, op_LSHIFT_23, op_RSHIFT_26); + RzILOpPure *op_AND_29 = LOGAND(CAST(64, IL_FALSE, READ_REG(pkt, Rx_op, false)), cond_27); + RzILOpEffect *op_ASSIGN_31 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_AND_29)); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_31); + return instruction_sequence; +} + +// Rx -= lsr(Rs,Rt) +RzILOpEffect *hex_il_op_s2_lsr_r_r_nac(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rx = ((st32) ((ut64) Rx) - ((shamt < 0x0) ? ((((ut64) ((ut32) Rs)) << (-shamt) - 0x1) << 0x1) : (((ut64) ((ut32) Rs)) >> shamt))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_18 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_20 = SUB(op_NEG_18, SN(32, 1)); + RzILOpPure *op_LSHIFT_21 = SHIFTL0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, Rs)), op_SUB_20); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(op_LSHIFT_21, SN(32, 1)); + RzILOpPure *op_RSHIFT_26 = SHIFTR0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, DUP(Rs))), VARL("shamt")); + RzILOpPure *cond_27 = ITE(op_LT_14, op_LSHIFT_23, op_RSHIFT_26); + RzILOpPure *op_SUB_29 = SUB(CAST(64, IL_FALSE, READ_REG(pkt, Rx_op, false)), cond_27); + RzILOpEffect *op_ASSIGN_31 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_SUB_29)); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_31); + return instruction_sequence; +} + +// Rx |= lsr(Rs,Rt) +RzILOpEffect *hex_il_op_s2_lsr_r_r_or(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // Rx = ((st32) (((ut64) Rx) | ((shamt < 0x0) ? ((((ut64) ((ut32) Rs)) << (-shamt) - 0x1) << 0x1) : (((ut64) ((ut32) Rs)) >> shamt)))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_18 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_20 = SUB(op_NEG_18, SN(32, 1)); + RzILOpPure *op_LSHIFT_21 = SHIFTL0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, Rs)), op_SUB_20); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(op_LSHIFT_21, SN(32, 1)); + RzILOpPure *op_RSHIFT_26 = SHIFTR0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, DUP(Rs))), VARL("shamt")); + RzILOpPure *cond_27 = ITE(op_LT_14, op_LSHIFT_23, op_RSHIFT_26); + RzILOpPure *op_OR_29 = LOGOR(CAST(64, IL_FALSE, READ_REG(pkt, Rx_op, false)), cond_27); + RzILOpEffect *op_ASSIGN_31 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_OR_29)); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_10, op_ASSIGN_31); + return instruction_sequence; +} + +// Rdd = vlsrh(Rss,Rt) +RzILOpEffect *hex_il_op_s2_lsr_r_vh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp500 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp500", VARL("i")); + + // seq(h_tmp500 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | ((((sextract64(((ut64) Rt), 0x0, 0x7) < ((st64) 0x0)) ? ((((ut64) ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) << (-sextract64(((ut64) Rt), 0x0, 0x7)) - ((st64) 0x1)) << 0x1) : (((ut64) ((ut16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) >> sextract64(((ut64) Rt), 0x0, 0x7))) & ((ut64) 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_LT_27 = SLT(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7)), CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_MUL_30 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_31 = SHIFTRA(Rss, op_MUL_30); + RzILOpPure *op_AND_34 = LOGAND(op_RSHIFT_31, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_45 = NEG(SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7))); + RzILOpPure *op_SUB_48 = SUB(op_NEG_45, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *op_LSHIFT_49 = SHIFTL0(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_34)), op_SUB_48); + RzILOpPure *op_LSHIFT_51 = SHIFTL0(op_LSHIFT_49, SN(32, 1)); + RzILOpPure *op_MUL_53 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_54 = SHIFTRA(DUP(Rss), op_MUL_53); + RzILOpPure *op_AND_57 = LOGAND(op_RSHIFT_54, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_68 = SHIFTR0(CAST(64, IL_FALSE, CAST(16, IL_FALSE, op_AND_57)), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7))); + RzILOpPure *cond_69 = ITE(op_LT_27, op_LSHIFT_51, op_RSHIFT_68); + RzILOpPure *op_AND_72 = LOGAND(cond_69, CAST(64, IL_FALSE, SN(32, 0xffff))); + RzILOpPure *op_MUL_74 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_75 = SHIFTL0(op_AND_72, op_MUL_74); + RzILOpPure *op_OR_77 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_75); + RzILOpEffect *op_ASSIGN_79 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_77)); + + // seq(h_tmp500; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x1 ...; + RzILOpEffect *seq_81 = op_ASSIGN_79; + + // seq(seq(h_tmp500; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_82 = SEQN(2, seq_81, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp500; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_83 = REPEAT(op_LT_4, seq_82); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp500; Rdd = ((st64) ...; + RzILOpEffect *seq_84 = SEQN(2, op_ASSIGN_2, for_83); + + RzILOpEffect *instruction_sequence = seq_84; + return instruction_sequence; +} + +// Rdd = vlsrw(Rss,Rt) +RzILOpEffect *hex_il_op_s2_lsr_r_vw(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp501 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp501", VARL("i")); + + // seq(h_tmp501 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffffffff << i * 0x20)))) | ((((sextract64(((ut64) Rt), 0x0, 0x7) < ((st64) 0x0)) ? ((((ut64) ((ut32) ((ut64) ((ut32) ((Rss >> i * 0x20) & 0xffffffff))))) << (-sextract64(((ut64) Rt), 0x0, 0x7)) - ((st64) 0x1)) << 0x1) : (((ut64) ((ut32) ((ut64) ((ut32) ((Rss >> i * 0x20) & 0xffffffff))))) >> sextract64(((ut64) Rt), 0x0, 0x7))) & ((ut64) 0xffffffff)) << i * 0x20))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffffffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_LT_27 = SLT(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7)), CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_MUL_30 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_31 = SHIFTRA(Rss, op_MUL_30); + RzILOpPure *op_AND_33 = LOGAND(op_RSHIFT_31, SN(64, 0xffffffff)); + RzILOpPure *op_NEG_46 = NEG(SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7))); + RzILOpPure *op_SUB_49 = SUB(op_NEG_46, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *op_LSHIFT_50 = SHIFTL0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_33)))), op_SUB_49); + RzILOpPure *op_LSHIFT_52 = SHIFTL0(op_LSHIFT_50, SN(32, 1)); + RzILOpPure *op_MUL_54 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_55 = SHIFTRA(DUP(Rss), op_MUL_54); + RzILOpPure *op_AND_57 = LOGAND(op_RSHIFT_55, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_70 = SHIFTR0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, CAST(64, IL_FALSE, CAST(32, IL_FALSE, op_AND_57)))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7))); + RzILOpPure *cond_71 = ITE(op_LT_27, op_LSHIFT_52, op_RSHIFT_70); + RzILOpPure *op_AND_74 = LOGAND(cond_71, CAST(64, IL_FALSE, SN(64, 0xffffffff))); + RzILOpPure *op_MUL_76 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_77 = SHIFTL0(op_AND_74, op_MUL_76); + RzILOpPure *op_OR_79 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_77); + RzILOpEffect *op_ASSIGN_81 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_79)); + + // seq(h_tmp501; Rdd = ((st64) (((ut64) (Rdd & (~(0xffffffff << i * ...; + RzILOpEffect *seq_83 = op_ASSIGN_81; + + // seq(seq(h_tmp501; Rdd = ((st64) (((ut64) (Rdd & (~(0xffffffff << ...; + RzILOpEffect *seq_84 = SEQN(2, seq_83, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp501; Rdd = ((st64) (((ut64) (Rdd & (~(0xffffffff << ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_85 = REPEAT(op_LT_4, seq_84); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp501; Rdd = ((st64) ...; + RzILOpEffect *seq_86 = SEQN(2, op_ASSIGN_2, for_85); + + RzILOpEffect *instruction_sequence = seq_86; + return instruction_sequence; +} + +// Rd = mask(Ii,II) +RzILOpEffect *hex_il_op_s2_mask(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // U = U; + RzILOpEffect *imm_assign_7 = SETL("U", U); + + // Rd = ((0x1 << u) - 0x1 << U); + RzILOpPure *op_LSHIFT_4 = SHIFTL0(SN(32, 1), VARL("u")); + RzILOpPure *op_SUB_6 = SUB(op_LSHIFT_4, SN(32, 1)); + RzILOpPure *op_LSHIFT_9 = SHIFTL0(op_SUB_6, VARL("U")); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, Rd_op, op_LSHIFT_9); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_2, imm_assign_7, op_ASSIGN_10); + return instruction_sequence; +} + +// Rdd = packhl(Rs,Rt) +RzILOpEffect *hex_il_op_s2_packhl(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x0)))) | (((ut64) (((st32) ((st16) ((Rt >> 0x0) & 0xffff))) & 0xffff)) << 0x0))); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_12 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_14 = LOGAND(op_RSHIFT_12, SN(32, 0xffff)); + RzILOpPure *op_AND_18 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_14), DUP(op_AND_14))), CAST(16, MSB(DUP(op_AND_14)), DUP(op_AND_14))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(CAST(64, IL_FALSE, op_AND_18), SN(32, 0)); + RzILOpPure *op_OR_25 = LOGOR(CAST(64, IL_FALSE, op_AND_7), op_LSHIFT_23); + RzILOpEffect *op_ASSIGN_27 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_25)); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x10)))) | (((ut64) (((st32) ((st16) ((Rs >> 0x0) & 0xffff))) & 0xffff)) << 0x10))); + RzILOpPure *op_LSHIFT_33 = SHIFTL0(SN(64, 0xffff), SN(32, 16)); + RzILOpPure *op_NOT_34 = LOGNOT(op_LSHIFT_33); + RzILOpPure *op_AND_35 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_34); + RzILOpPure *op_RSHIFT_40 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_42 = LOGAND(op_RSHIFT_40, SN(32, 0xffff)); + RzILOpPure *op_AND_46 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_42), DUP(op_AND_42))), CAST(16, MSB(DUP(op_AND_42)), DUP(op_AND_42))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_51 = SHIFTL0(CAST(64, IL_FALSE, op_AND_46), SN(32, 16)); + RzILOpPure *op_OR_53 = LOGOR(CAST(64, IL_FALSE, op_AND_35), op_LSHIFT_51); + RzILOpEffect *op_ASSIGN_55 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_53)); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x20)))) | (((ut64) (((st32) ((st16) ((Rt >> 0x10) & 0xffff))) & 0xffff)) << 0x20))); + RzILOpPure *op_LSHIFT_61 = SHIFTL0(SN(64, 0xffff), SN(32, 0x20)); + RzILOpPure *op_NOT_62 = LOGNOT(op_LSHIFT_61); + RzILOpPure *op_AND_63 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_62); + RzILOpPure *op_RSHIFT_67 = SHIFTRA(DUP(Rt), SN(32, 16)); + RzILOpPure *op_AND_69 = LOGAND(op_RSHIFT_67, SN(32, 0xffff)); + RzILOpPure *op_AND_73 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_69), DUP(op_AND_69))), CAST(16, MSB(DUP(op_AND_69)), DUP(op_AND_69))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_78 = SHIFTL0(CAST(64, IL_FALSE, op_AND_73), SN(32, 0x20)); + RzILOpPure *op_OR_80 = LOGOR(CAST(64, IL_FALSE, op_AND_63), op_LSHIFT_78); + RzILOpEffect *op_ASSIGN_82 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_80)); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x30)))) | (((ut64) (((st32) ((st16) ((Rs >> 0x10) & 0xffff))) & 0xffff)) << 0x30))); + RzILOpPure *op_LSHIFT_88 = SHIFTL0(SN(64, 0xffff), SN(32, 0x30)); + RzILOpPure *op_NOT_89 = LOGNOT(op_LSHIFT_88); + RzILOpPure *op_AND_90 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_89); + RzILOpPure *op_RSHIFT_94 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_96 = LOGAND(op_RSHIFT_94, SN(32, 0xffff)); + RzILOpPure *op_AND_100 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_96), DUP(op_AND_96))), CAST(16, MSB(DUP(op_AND_96)), DUP(op_AND_96))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_105 = SHIFTL0(CAST(64, IL_FALSE, op_AND_100), SN(32, 0x30)); + RzILOpPure *op_OR_107 = LOGOR(CAST(64, IL_FALSE, op_AND_90), op_LSHIFT_105); + RzILOpEffect *op_ASSIGN_109 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_107)); + + RzILOpEffect *instruction_sequence = SEQN(4, op_ASSIGN_27, op_ASSIGN_55, op_ASSIGN_82, op_ASSIGN_109); + return instruction_sequence; +} + +// Rd = parity(Rss,Rtt) +RzILOpEffect *hex_il_op_s2_parityp(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (!Pv) memb(Rs+Ii) = Rt +RzILOpEffect *hex_il_op_s2_pstorerbf_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_21_22 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_19), DUP(op_AND_19)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_23 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff))))); + RzILOpEffect *seq_then_24 = ms_cast_ut8_21_22; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_25 = c_call_23; + + // if (! (((st32) Pv) & 0x1)) {seq(mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_12 = INV(NON_ZERO(op_AND_11)); + RzILOpEffect *branch_26 = BRANCH(op_INV_12, seq_then_24, seq_else_25); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_26); + return instruction_sequence; +} + +// if (!Pv) memb(Rx++Ii) = Rt +RzILOpEffect *hex_il_op_s2_pstorerbf_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_10 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_12 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rx_op, op_ADD_12); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_19, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_23_24 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_21), DUP(op_AND_21)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_25 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(Rx = Rx + s; mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & ...; + RzILOpEffect *seq_then_26 = SEQN(2, op_ASSIGN_13, ms_cast_ut8_23_24); + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_27 = c_call_25; + + // if (! (((st32) Pv) & 0x1)) {seq(Rx = Rx + s; mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_28 = BRANCH(op_INV_9, seq_then_26, seq_else_27); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_10, op_ASSIGN_3, branch_28); + return instruction_sequence; +} + +// if (!Pv.new) memb(Rx++Ii) = Rt +RzILOpEffect *hex_il_op_s2_pstorerbfnew_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_10 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_12 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rx_op, op_ADD_12); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_19, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_23_24 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_21), DUP(op_AND_21)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_25 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(Rx = Rx + s; mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & ...; + RzILOpEffect *seq_then_26 = SEQN(2, op_ASSIGN_13, ms_cast_ut8_23_24); + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_27 = c_call_25; + + // if (! (((st32) Pv_new) & 0x1)) {seq(Rx = Rx + s; mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_28 = BRANCH(op_INV_9, seq_then_26, seq_else_27); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_10, op_ASSIGN_3, branch_28); + return instruction_sequence; +} + +// if (!Pv) memb(Rs+Ii) = Nt.new +RzILOpEffect *hex_il_op_s2_pstorerbnewf_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_21_22 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_19), DUP(op_AND_19)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_23 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff))))); + RzILOpEffect *seq_then_24 = ms_cast_ut8_21_22; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_25 = c_call_23; + + // if (! (((st32) Pv) & 0x1)) {seq(mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_12 = INV(NON_ZERO(op_AND_11)); + RzILOpEffect *branch_26 = BRANCH(op_INV_12, seq_then_24, seq_else_25); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_26); + return instruction_sequence; +} + +// if (!Pv) memb(Rx++Ii) = Nt.new +RzILOpEffect *hex_il_op_s2_pstorerbnewf_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_10 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_12 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rx_op, op_ADD_12); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_19, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_23_24 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_21), DUP(op_AND_21)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_25 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(Rx = Rx + s; mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0 ...; + RzILOpEffect *seq_then_26 = SEQN(2, op_ASSIGN_13, ms_cast_ut8_23_24); + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_27 = c_call_25; + + // if (! (((st32) Pv) & 0x1)) {seq(Rx = Rx + s; mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0 ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_28 = BRANCH(op_INV_9, seq_then_26, seq_else_27); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_10, op_ASSIGN_3, branch_28); + return instruction_sequence; +} + +// if (!Pv.new) memb(Rx++Ii) = Nt.new +RzILOpEffect *hex_il_op_s2_pstorerbnewfnew_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_10 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_12 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rx_op, op_ADD_12); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_19, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_23_24 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_21), DUP(op_AND_21)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_25 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(Rx = Rx + s; mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0 ...; + RzILOpEffect *seq_then_26 = SEQN(2, op_ASSIGN_13, ms_cast_ut8_23_24); + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_27 = c_call_25; + + // if (! (((st32) Pv_new) & 0x1)) {seq(Rx = Rx + s; mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0 ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_28 = BRANCH(op_INV_9, seq_then_26, seq_else_27); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_10, op_ASSIGN_3, branch_28); + return instruction_sequence; +} + +// if (Pv) memb(Rs+Ii) = Nt.new +RzILOpEffect *hex_il_op_s2_pstorerbnewt_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_20_21 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_18), DUP(op_AND_18)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_22 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff))))); + RzILOpEffect *seq_then_23 = ms_cast_ut8_20_21; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_24 = c_call_22; + + // if ((((st32) Pv) & 0x1)) {seq(mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_25 = BRANCH(NON_ZERO(op_AND_11), seq_then_23, seq_else_24); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_25); + return instruction_sequence; +} + +// if (Pv) memb(Rx++Ii) = Nt.new +RzILOpEffect *hex_il_op_s2_pstorerbnewt_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_9 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_11 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rx_op, op_ADD_11); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_18 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_20 = LOGAND(op_RSHIFT_18, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_22_23 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_20), DUP(op_AND_20)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_24 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(Rx = Rx + s; mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0 ...; + RzILOpEffect *seq_then_25 = SEQN(2, op_ASSIGN_12, ms_cast_ut8_22_23); + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_26 = c_call_24; + + // if ((((st32) Pv) & 0x1)) {seq(Rx = Rx + s; mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0 ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_27 = BRANCH(NON_ZERO(op_AND_8), seq_then_25, seq_else_26); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_9, op_ASSIGN_3, branch_27); + return instruction_sequence; +} + +// if (Pv.new) memb(Rx++Ii) = Nt.new +RzILOpEffect *hex_il_op_s2_pstorerbnewtnew_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_9 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_11 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rx_op, op_ADD_11); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_18 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_20 = LOGAND(op_RSHIFT_18, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_22_23 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_20), DUP(op_AND_20)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_24 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(Rx = Rx + s; mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0 ...; + RzILOpEffect *seq_then_25 = SEQN(2, op_ASSIGN_12, ms_cast_ut8_22_23); + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_26 = c_call_24; + + // if ((((st32) Pv_new) & 0x1)) {seq(Rx = Rx + s; mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0 ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_27 = BRANCH(NON_ZERO(op_AND_8), seq_then_25, seq_else_26); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_9, op_ASSIGN_3, branch_27); + return instruction_sequence; +} + +// if (Pv) memb(Rs+Ii) = Rt +RzILOpEffect *hex_il_op_s2_pstorerbt_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_20_21 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_18), DUP(op_AND_18)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_22 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff))))); + RzILOpEffect *seq_then_23 = ms_cast_ut8_20_21; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_24 = c_call_22; + + // if ((((st32) Pv) & 0x1)) {seq(mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_25 = BRANCH(NON_ZERO(op_AND_11), seq_then_23, seq_else_24); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_25); + return instruction_sequence; +} + +// if (Pv) memb(Rx++Ii) = Rt +RzILOpEffect *hex_il_op_s2_pstorerbt_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_9 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_11 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rx_op, op_ADD_11); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_18 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_20 = LOGAND(op_RSHIFT_18, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_22_23 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_20), DUP(op_AND_20)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_24 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(Rx = Rx + s; mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & ...; + RzILOpEffect *seq_then_25 = SEQN(2, op_ASSIGN_12, ms_cast_ut8_22_23); + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_26 = c_call_24; + + // if ((((st32) Pv) & 0x1)) {seq(Rx = Rx + s; mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_27 = BRANCH(NON_ZERO(op_AND_8), seq_then_25, seq_else_26); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_9, op_ASSIGN_3, branch_27); + return instruction_sequence; +} + +// if (Pv.new) memb(Rx++Ii) = Rt +RzILOpEffect *hex_il_op_s2_pstorerbtnew_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_9 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_11 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rx_op, op_ADD_11); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_18 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_20 = LOGAND(op_RSHIFT_18, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_22_23 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_20), DUP(op_AND_20)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_24 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(Rx = Rx + s; mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & ...; + RzILOpEffect *seq_then_25 = SEQN(2, op_ASSIGN_12, ms_cast_ut8_22_23); + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_26 = c_call_24; + + // if ((((st32) Pv_new) & 0x1)) {seq(Rx = Rx + s; mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_27 = BRANCH(NON_ZERO(op_AND_8), seq_then_25, seq_else_26); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_9, op_ASSIGN_3, branch_27); + return instruction_sequence; +} + +// if (!Pv) memd(Rs+Ii) = Rtt +RzILOpEffect *hex_il_op_s2_pstorerdf_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut64(EA, ((ut64) Rtt)); + RzILOpEffect *ms_cast_ut64_14_15 = STOREW(VARL("EA"), CAST(64, IL_FALSE, Rtt)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_16 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut64(EA, ((ut64) Rtt))); + RzILOpEffect *seq_then_17 = ms_cast_ut64_14_15; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_18 = c_call_16; + + // if (! (((st32) Pv) & 0x1)) {seq(mem_store_ut64(EA, ((ut64) Rtt)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_12 = INV(NON_ZERO(op_AND_11)); + RzILOpEffect *branch_19 = BRANCH(op_INV_12, seq_then_17, seq_else_18); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_19); + return instruction_sequence; +} + +// if (!Pv) memd(Rx++Ii) = Rtt +RzILOpEffect *hex_il_op_s2_pstorerdf_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_10 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_12 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rx_op, op_ADD_12); + + // mem_store_ut64(EA, ((ut64) Rtt)); + RzILOpEffect *ms_cast_ut64_16_17 = STOREW(VARL("EA"), CAST(64, IL_FALSE, Rtt)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_18 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(Rx = Rx + s; mem_store_ut64(EA, ((ut64) Rtt))); + RzILOpEffect *seq_then_19 = SEQN(2, op_ASSIGN_13, ms_cast_ut64_16_17); + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_20 = c_call_18; + + // if (! (((st32) Pv) & 0x1)) {seq(Rx = Rx + s; mem_store_ut64(EA, ((ut64) Rtt)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_21 = BRANCH(op_INV_9, seq_then_19, seq_else_20); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_10, op_ASSIGN_3, branch_21); + return instruction_sequence; +} + +// if (!Pv.new) memd(Rx++Ii) = Rtt +RzILOpEffect *hex_il_op_s2_pstorerdfnew_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_10 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_12 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rx_op, op_ADD_12); + + // mem_store_ut64(EA, ((ut64) Rtt)); + RzILOpEffect *ms_cast_ut64_16_17 = STOREW(VARL("EA"), CAST(64, IL_FALSE, Rtt)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_18 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(Rx = Rx + s; mem_store_ut64(EA, ((ut64) Rtt))); + RzILOpEffect *seq_then_19 = SEQN(2, op_ASSIGN_13, ms_cast_ut64_16_17); + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_20 = c_call_18; + + // if (! (((st32) Pv_new) & 0x1)) {seq(Rx = Rx + s; mem_store_ut64(EA, ((ut64) Rtt)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_21 = BRANCH(op_INV_9, seq_then_19, seq_else_20); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_10, op_ASSIGN_3, branch_21); + return instruction_sequence; +} + +// if (Pv) memd(Rs+Ii) = Rtt +RzILOpEffect *hex_il_op_s2_pstorerdt_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut64(EA, ((ut64) Rtt)); + RzILOpEffect *ms_cast_ut64_13_14 = STOREW(VARL("EA"), CAST(64, IL_FALSE, Rtt)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_15 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut64(EA, ((ut64) Rtt))); + RzILOpEffect *seq_then_16 = ms_cast_ut64_13_14; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_17 = c_call_15; + + // if ((((st32) Pv) & 0x1)) {seq(mem_store_ut64(EA, ((ut64) Rtt)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_11), seq_then_16, seq_else_17); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_18); + return instruction_sequence; +} + +// if (Pv) memd(Rx++Ii) = Rtt +RzILOpEffect *hex_il_op_s2_pstorerdt_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_9 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_11 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rx_op, op_ADD_11); + + // mem_store_ut64(EA, ((ut64) Rtt)); + RzILOpEffect *ms_cast_ut64_15_16 = STOREW(VARL("EA"), CAST(64, IL_FALSE, Rtt)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_17 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(Rx = Rx + s; mem_store_ut64(EA, ((ut64) Rtt))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, ms_cast_ut64_15_16); + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_19 = c_call_17; + + // if ((((st32) Pv) & 0x1)) {seq(Rx = Rx + s; mem_store_ut64(EA, ((ut64) Rtt)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_20 = BRANCH(NON_ZERO(op_AND_8), seq_then_18, seq_else_19); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_9, op_ASSIGN_3, branch_20); + return instruction_sequence; +} + +// if (Pv.new) memd(Rx++Ii) = Rtt +RzILOpEffect *hex_il_op_s2_pstorerdtnew_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_9 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_11 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rx_op, op_ADD_11); + + // mem_store_ut64(EA, ((ut64) Rtt)); + RzILOpEffect *ms_cast_ut64_15_16 = STOREW(VARL("EA"), CAST(64, IL_FALSE, Rtt)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_17 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(Rx = Rx + s; mem_store_ut64(EA, ((ut64) Rtt))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, ms_cast_ut64_15_16); + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_19 = c_call_17; + + // if ((((st32) Pv_new) & 0x1)) {seq(Rx = Rx + s; mem_store_ut64(EA, ((ut64) Rtt)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_20 = BRANCH(NON_ZERO(op_AND_8), seq_then_18, seq_else_19); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_9, op_ASSIGN_3, branch_20); + return instruction_sequence; +} + +// if (!Pv) memh(Rs+Ii) = Rt.h +RzILOpEffect *hex_il_op_s2_pstorerff_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_21_22 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_19), DUP(op_AND_19)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_23 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff))) ...; + RzILOpEffect *seq_then_24 = ms_cast_ut16_21_22; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_25 = c_call_23; + + // if (! (((st32) Pv) & 0x1)) {seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff))) ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_12 = INV(NON_ZERO(op_AND_11)); + RzILOpEffect *branch_26 = BRANCH(op_INV_12, seq_then_24, seq_else_25); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_26); + return instruction_sequence; +} + +// if (!Pv) memh(Rx++Ii) = Rt.h +RzILOpEffect *hex_il_op_s2_pstorerff_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_10 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_12 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rx_op, op_ADD_12); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_19, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_23_24 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_21), DUP(op_AND_21)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_25 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(Rx = Rx + s; mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10 ...; + RzILOpEffect *seq_then_26 = SEQN(2, op_ASSIGN_13, ms_cast_ut16_23_24); + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_27 = c_call_25; + + // if (! (((st32) Pv) & 0x1)) {seq(Rx = Rx + s; mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10 ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_28 = BRANCH(op_INV_9, seq_then_26, seq_else_27); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_10, op_ASSIGN_3, branch_28); + return instruction_sequence; +} + +// if (!Pv.new) memh(Rx++Ii) = Rt.h +RzILOpEffect *hex_il_op_s2_pstorerffnew_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_10 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_12 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rx_op, op_ADD_12); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_19, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_23_24 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_21), DUP(op_AND_21)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_25 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(Rx = Rx + s; mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10 ...; + RzILOpEffect *seq_then_26 = SEQN(2, op_ASSIGN_13, ms_cast_ut16_23_24); + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_27 = c_call_25; + + // if (! (((st32) Pv_new) & 0x1)) {seq(Rx = Rx + s; mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10 ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_28 = BRANCH(op_INV_9, seq_then_26, seq_else_27); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_10, op_ASSIGN_3, branch_28); + return instruction_sequence; +} + +// if (Pv) memh(Rs+Ii) = Rt.h +RzILOpEffect *hex_il_op_s2_pstorerft_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_20_21 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_18), DUP(op_AND_18)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_22 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff))) ...; + RzILOpEffect *seq_then_23 = ms_cast_ut16_20_21; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_24 = c_call_22; + + // if ((((st32) Pv) & 0x1)) {seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff))) ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_25 = BRANCH(NON_ZERO(op_AND_11), seq_then_23, seq_else_24); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_25); + return instruction_sequence; +} + +// if (Pv) memh(Rx++Ii) = Rt.h +RzILOpEffect *hex_il_op_s2_pstorerft_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_9 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_11 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rx_op, op_ADD_11); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_18 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_20 = LOGAND(op_RSHIFT_18, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_22_23 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_20), DUP(op_AND_20)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_24 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(Rx = Rx + s; mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10 ...; + RzILOpEffect *seq_then_25 = SEQN(2, op_ASSIGN_12, ms_cast_ut16_22_23); + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_26 = c_call_24; + + // if ((((st32) Pv) & 0x1)) {seq(Rx = Rx + s; mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10 ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_27 = BRANCH(NON_ZERO(op_AND_8), seq_then_25, seq_else_26); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_9, op_ASSIGN_3, branch_27); + return instruction_sequence; +} + +// if (Pv.new) memh(Rx++Ii) = Rt.h +RzILOpEffect *hex_il_op_s2_pstorerftnew_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_9 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_11 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rx_op, op_ADD_11); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_18 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_20 = LOGAND(op_RSHIFT_18, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_22_23 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_20), DUP(op_AND_20)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_24 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(Rx = Rx + s; mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10 ...; + RzILOpEffect *seq_then_25 = SEQN(2, op_ASSIGN_12, ms_cast_ut16_22_23); + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_26 = c_call_24; + + // if ((((st32) Pv_new) & 0x1)) {seq(Rx = Rx + s; mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10 ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_27 = BRANCH(NON_ZERO(op_AND_8), seq_then_25, seq_else_26); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_9, op_ASSIGN_3, branch_27); + return instruction_sequence; +} + +// if (!Pv) memh(Rs+Ii) = Rt +RzILOpEffect *hex_il_op_s2_pstorerhf_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_21_22 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_19), DUP(op_AND_19)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_23 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff))))); + RzILOpEffect *seq_then_24 = ms_cast_ut16_21_22; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_25 = c_call_23; + + // if (! (((st32) Pv) & 0x1)) {seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_12 = INV(NON_ZERO(op_AND_11)); + RzILOpEffect *branch_26 = BRANCH(op_INV_12, seq_then_24, seq_else_25); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_26); + return instruction_sequence; +} + +// if (!Pv) memh(Rx++Ii) = Rt +RzILOpEffect *hex_il_op_s2_pstorerhf_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_10 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_12 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rx_op, op_ADD_12); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_19, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_23_24 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_21), DUP(op_AND_21)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_25 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(Rx = Rx + s; mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) ...; + RzILOpEffect *seq_then_26 = SEQN(2, op_ASSIGN_13, ms_cast_ut16_23_24); + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_27 = c_call_25; + + // if (! (((st32) Pv) & 0x1)) {seq(Rx = Rx + s; mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_28 = BRANCH(op_INV_9, seq_then_26, seq_else_27); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_10, op_ASSIGN_3, branch_28); + return instruction_sequence; +} + +// if (!Pv.new) memh(Rx++Ii) = Rt +RzILOpEffect *hex_il_op_s2_pstorerhfnew_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_10 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_12 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rx_op, op_ADD_12); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_19, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_23_24 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_21), DUP(op_AND_21)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_25 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(Rx = Rx + s; mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) ...; + RzILOpEffect *seq_then_26 = SEQN(2, op_ASSIGN_13, ms_cast_ut16_23_24); + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_27 = c_call_25; + + // if (! (((st32) Pv_new) & 0x1)) {seq(Rx = Rx + s; mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_28 = BRANCH(op_INV_9, seq_then_26, seq_else_27); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_10, op_ASSIGN_3, branch_28); + return instruction_sequence; +} + +// if (!Pv) memh(Rs+Ii) = Nt.new +RzILOpEffect *hex_il_op_s2_pstorerhnewf_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_21_22 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_19), DUP(op_AND_19)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_23 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff ...; + RzILOpEffect *seq_then_24 = ms_cast_ut16_21_22; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_25 = c_call_23; + + // if (! (((st32) Pv) & 0x1)) {seq(mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_12 = INV(NON_ZERO(op_AND_11)); + RzILOpEffect *branch_26 = BRANCH(op_INV_12, seq_then_24, seq_else_25); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_26); + return instruction_sequence; +} + +// if (!Pv) memh(Rx++Ii) = Nt.new +RzILOpEffect *hex_il_op_s2_pstorerhnewf_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_10 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_12 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rx_op, op_ADD_12); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_19, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_23_24 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_21), DUP(op_AND_21)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_25 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(Rx = Rx + s; mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> ...; + RzILOpEffect *seq_then_26 = SEQN(2, op_ASSIGN_13, ms_cast_ut16_23_24); + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_27 = c_call_25; + + // if (! (((st32) Pv) & 0x1)) {seq(Rx = Rx + s; mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_28 = BRANCH(op_INV_9, seq_then_26, seq_else_27); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_10, op_ASSIGN_3, branch_28); + return instruction_sequence; +} + +// if (!Pv.new) memh(Rx++Ii) = Nt.new +RzILOpEffect *hex_il_op_s2_pstorerhnewfnew_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_10 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_12 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rx_op, op_ADD_12); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_19, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_23_24 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_21), DUP(op_AND_21)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_25 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(Rx = Rx + s; mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> ...; + RzILOpEffect *seq_then_26 = SEQN(2, op_ASSIGN_13, ms_cast_ut16_23_24); + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_27 = c_call_25; + + // if (! (((st32) Pv_new) & 0x1)) {seq(Rx = Rx + s; mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_28 = BRANCH(op_INV_9, seq_then_26, seq_else_27); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_10, op_ASSIGN_3, branch_28); + return instruction_sequence; +} + +// if (Pv) memh(Rs+Ii) = Nt.new +RzILOpEffect *hex_il_op_s2_pstorerhnewt_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_20_21 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_18), DUP(op_AND_18)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_22 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff ...; + RzILOpEffect *seq_then_23 = ms_cast_ut16_20_21; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_24 = c_call_22; + + // if ((((st32) Pv) & 0x1)) {seq(mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_25 = BRANCH(NON_ZERO(op_AND_11), seq_then_23, seq_else_24); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_25); + return instruction_sequence; +} + +// if (Pv) memh(Rx++Ii) = Nt.new +RzILOpEffect *hex_il_op_s2_pstorerhnewt_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_9 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_11 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rx_op, op_ADD_11); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_18 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_20 = LOGAND(op_RSHIFT_18, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_22_23 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_20), DUP(op_AND_20)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_24 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(Rx = Rx + s; mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> ...; + RzILOpEffect *seq_then_25 = SEQN(2, op_ASSIGN_12, ms_cast_ut16_22_23); + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_26 = c_call_24; + + // if ((((st32) Pv) & 0x1)) {seq(Rx = Rx + s; mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_27 = BRANCH(NON_ZERO(op_AND_8), seq_then_25, seq_else_26); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_9, op_ASSIGN_3, branch_27); + return instruction_sequence; +} + +// if (Pv.new) memh(Rx++Ii) = Nt.new +RzILOpEffect *hex_il_op_s2_pstorerhnewtnew_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_9 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_11 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rx_op, op_ADD_11); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_18 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_20 = LOGAND(op_RSHIFT_18, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_22_23 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_20), DUP(op_AND_20)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_24 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(Rx = Rx + s; mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> ...; + RzILOpEffect *seq_then_25 = SEQN(2, op_ASSIGN_12, ms_cast_ut16_22_23); + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_26 = c_call_24; + + // if ((((st32) Pv_new) & 0x1)) {seq(Rx = Rx + s; mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_27 = BRANCH(NON_ZERO(op_AND_8), seq_then_25, seq_else_26); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_9, op_ASSIGN_3, branch_27); + return instruction_sequence; +} + +// if (Pv) memh(Rs+Ii) = Rt +RzILOpEffect *hex_il_op_s2_pstorerht_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_20_21 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_18), DUP(op_AND_18)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_22 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff))))); + RzILOpEffect *seq_then_23 = ms_cast_ut16_20_21; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_24 = c_call_22; + + // if ((((st32) Pv) & 0x1)) {seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_25 = BRANCH(NON_ZERO(op_AND_11), seq_then_23, seq_else_24); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_25); + return instruction_sequence; +} + +// if (Pv) memh(Rx++Ii) = Rt +RzILOpEffect *hex_il_op_s2_pstorerht_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_9 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_11 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rx_op, op_ADD_11); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_18 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_20 = LOGAND(op_RSHIFT_18, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_22_23 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_20), DUP(op_AND_20)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_24 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(Rx = Rx + s; mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) ...; + RzILOpEffect *seq_then_25 = SEQN(2, op_ASSIGN_12, ms_cast_ut16_22_23); + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_26 = c_call_24; + + // if ((((st32) Pv) & 0x1)) {seq(Rx = Rx + s; mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_27 = BRANCH(NON_ZERO(op_AND_8), seq_then_25, seq_else_26); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_9, op_ASSIGN_3, branch_27); + return instruction_sequence; +} + +// if (Pv.new) memh(Rx++Ii) = Rt +RzILOpEffect *hex_il_op_s2_pstorerhtnew_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_9 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_11 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rx_op, op_ADD_11); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_18 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_20 = LOGAND(op_RSHIFT_18, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_22_23 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_20), DUP(op_AND_20)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_24 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(Rx = Rx + s; mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) ...; + RzILOpEffect *seq_then_25 = SEQN(2, op_ASSIGN_12, ms_cast_ut16_22_23); + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_26 = c_call_24; + + // if ((((st32) Pv_new) & 0x1)) {seq(Rx = Rx + s; mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_27 = BRANCH(NON_ZERO(op_AND_8), seq_then_25, seq_else_26); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_9, op_ASSIGN_3, branch_27); + return instruction_sequence; +} + +// if (!Pv) memw(Rs+Ii) = Rt +RzILOpEffect *hex_il_op_s2_pstorerif_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut32(EA, ((ut32) Rt)); + RzILOpEffect *ms_cast_ut32_14_15 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Rt)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_16 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut32(EA, ((ut32) Rt))); + RzILOpEffect *seq_then_17 = ms_cast_ut32_14_15; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_18 = c_call_16; + + // if (! (((st32) Pv) & 0x1)) {seq(mem_store_ut32(EA, ((ut32) Rt)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_12 = INV(NON_ZERO(op_AND_11)); + RzILOpEffect *branch_19 = BRANCH(op_INV_12, seq_then_17, seq_else_18); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_19); + return instruction_sequence; +} + +// if (!Pv) memw(Rx++Ii) = Rt +RzILOpEffect *hex_il_op_s2_pstorerif_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_10 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_12 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rx_op, op_ADD_12); + + // mem_store_ut32(EA, ((ut32) Rt)); + RzILOpEffect *ms_cast_ut32_16_17 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Rt)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_18 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(Rx = Rx + s; mem_store_ut32(EA, ((ut32) Rt))); + RzILOpEffect *seq_then_19 = SEQN(2, op_ASSIGN_13, ms_cast_ut32_16_17); + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_20 = c_call_18; + + // if (! (((st32) Pv) & 0x1)) {seq(Rx = Rx + s; mem_store_ut32(EA, ((ut32) Rt)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_21 = BRANCH(op_INV_9, seq_then_19, seq_else_20); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_10, op_ASSIGN_3, branch_21); + return instruction_sequence; +} + +// if (!Pv.new) memw(Rx++Ii) = Rt +RzILOpEffect *hex_il_op_s2_pstorerifnew_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_10 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_12 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rx_op, op_ADD_12); + + // mem_store_ut32(EA, ((ut32) Rt)); + RzILOpEffect *ms_cast_ut32_16_17 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Rt)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_18 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(Rx = Rx + s; mem_store_ut32(EA, ((ut32) Rt))); + RzILOpEffect *seq_then_19 = SEQN(2, op_ASSIGN_13, ms_cast_ut32_16_17); + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_20 = c_call_18; + + // if (! (((st32) Pv_new) & 0x1)) {seq(Rx = Rx + s; mem_store_ut32(EA, ((ut32) Rt)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_21 = BRANCH(op_INV_9, seq_then_19, seq_else_20); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_10, op_ASSIGN_3, branch_21); + return instruction_sequence; +} + +// if (!Pv) memw(Rs+Ii) = Nt.new +RzILOpEffect *hex_il_op_s2_pstorerinewf_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut32(EA, ((ut32) Nt_new)); + RzILOpEffect *ms_cast_ut32_14_15 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Nt_new)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_16 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut32(EA, ((ut32) Nt_new))); + RzILOpEffect *seq_then_17 = ms_cast_ut32_14_15; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_18 = c_call_16; + + // if (! (((st32) Pv) & 0x1)) {seq(mem_store_ut32(EA, ((ut32) Nt_new)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_12 = INV(NON_ZERO(op_AND_11)); + RzILOpEffect *branch_19 = BRANCH(op_INV_12, seq_then_17, seq_else_18); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_19); + return instruction_sequence; +} + +// if (!Pv) memw(Rx++Ii) = Nt.new +RzILOpEffect *hex_il_op_s2_pstorerinewf_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_10 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_12 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rx_op, op_ADD_12); + + // mem_store_ut32(EA, ((ut32) Nt_new)); + RzILOpEffect *ms_cast_ut32_16_17 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Nt_new)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_18 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(Rx = Rx + s; mem_store_ut32(EA, ((ut32) Nt_new))); + RzILOpEffect *seq_then_19 = SEQN(2, op_ASSIGN_13, ms_cast_ut32_16_17); + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_20 = c_call_18; + + // if (! (((st32) Pv) & 0x1)) {seq(Rx = Rx + s; mem_store_ut32(EA, ((ut32) Nt_new)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_21 = BRANCH(op_INV_9, seq_then_19, seq_else_20); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_10, op_ASSIGN_3, branch_21); + return instruction_sequence; +} + +// if (!Pv.new) memw(Rx++Ii) = Nt.new +RzILOpEffect *hex_il_op_s2_pstorerinewfnew_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_10 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_12 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Rx_op, op_ADD_12); + + // mem_store_ut32(EA, ((ut32) Nt_new)); + RzILOpEffect *ms_cast_ut32_16_17 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Nt_new)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_18 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(Rx = Rx + s; mem_store_ut32(EA, ((ut32) Nt_new))); + RzILOpEffect *seq_then_19 = SEQN(2, op_ASSIGN_13, ms_cast_ut32_16_17); + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_20 = c_call_18; + + // if (! (((st32) Pv_new) & 0x1)) {seq(Rx = Rx + s; mem_store_ut32(EA, ((ut32) Nt_new)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_21 = BRANCH(op_INV_9, seq_then_19, seq_else_20); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_10, op_ASSIGN_3, branch_21); + return instruction_sequence; +} + +// if (Pv) memw(Rs+Ii) = Nt.new +RzILOpEffect *hex_il_op_s2_pstorerinewt_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut32(EA, ((ut32) Nt_new)); + RzILOpEffect *ms_cast_ut32_13_14 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Nt_new)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_15 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut32(EA, ((ut32) Nt_new))); + RzILOpEffect *seq_then_16 = ms_cast_ut32_13_14; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_17 = c_call_15; + + // if ((((st32) Pv) & 0x1)) {seq(mem_store_ut32(EA, ((ut32) Nt_new)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_11), seq_then_16, seq_else_17); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_18); + return instruction_sequence; +} + +// if (Pv) memw(Rx++Ii) = Nt.new +RzILOpEffect *hex_il_op_s2_pstorerinewt_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_9 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_11 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rx_op, op_ADD_11); + + // mem_store_ut32(EA, ((ut32) Nt_new)); + RzILOpEffect *ms_cast_ut32_15_16 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Nt_new)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_17 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(Rx = Rx + s; mem_store_ut32(EA, ((ut32) Nt_new))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, ms_cast_ut32_15_16); + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_19 = c_call_17; + + // if ((((st32) Pv) & 0x1)) {seq(Rx = Rx + s; mem_store_ut32(EA, ((ut32) Nt_new)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_20 = BRANCH(NON_ZERO(op_AND_8), seq_then_18, seq_else_19); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_9, op_ASSIGN_3, branch_20); + return instruction_sequence; +} + +// if (Pv.new) memw(Rx++Ii) = Nt.new +RzILOpEffect *hex_il_op_s2_pstorerinewtnew_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_9 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_11 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rx_op, op_ADD_11); + + // mem_store_ut32(EA, ((ut32) Nt_new)); + RzILOpEffect *ms_cast_ut32_15_16 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Nt_new)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_17 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(Rx = Rx + s; mem_store_ut32(EA, ((ut32) Nt_new))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, ms_cast_ut32_15_16); + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_19 = c_call_17; + + // if ((((st32) Pv_new) & 0x1)) {seq(Rx = Rx + s; mem_store_ut32(EA, ((ut32) Nt_new)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_20 = BRANCH(NON_ZERO(op_AND_8), seq_then_18, seq_else_19); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_9, op_ASSIGN_3, branch_20); + return instruction_sequence; +} + +// if (Pv) memw(Rs+Ii) = Rt +RzILOpEffect *hex_il_op_s2_pstorerit_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut32(EA, ((ut32) Rt)); + RzILOpEffect *ms_cast_ut32_13_14 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Rt)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_15 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut32(EA, ((ut32) Rt))); + RzILOpEffect *seq_then_16 = ms_cast_ut32_13_14; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_17 = c_call_15; + + // if ((((st32) Pv) & 0x1)) {seq(mem_store_ut32(EA, ((ut32) Rt)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_11), seq_then_16, seq_else_17); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_18); + return instruction_sequence; +} + +// if (Pv) memw(Rx++Ii) = Rt +RzILOpEffect *hex_il_op_s2_pstorerit_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_9 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_11 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rx_op, op_ADD_11); + + // mem_store_ut32(EA, ((ut32) Rt)); + RzILOpEffect *ms_cast_ut32_15_16 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Rt)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_17 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(Rx = Rx + s; mem_store_ut32(EA, ((ut32) Rt))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, ms_cast_ut32_15_16); + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_19 = c_call_17; + + // if ((((st32) Pv) & 0x1)) {seq(Rx = Rx + s; mem_store_ut32(EA, ((ut32) Rt)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_20 = BRANCH(NON_ZERO(op_AND_8), seq_then_18, seq_else_19); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_9, op_ASSIGN_3, branch_20); + return instruction_sequence; +} + +// if (Pv.new) memw(Rx++Ii) = Rt +RzILOpEffect *hex_il_op_s2_pstoreritnew_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_9 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_11 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rx_op, op_ADD_11); + + // mem_store_ut32(EA, ((ut32) Rt)); + RzILOpEffect *ms_cast_ut32_15_16 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Rt)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_17 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(Rx = Rx + s; mem_store_ut32(EA, ((ut32) Rt))); + RzILOpEffect *seq_then_18 = SEQN(2, op_ASSIGN_12, ms_cast_ut32_15_16); + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_19 = c_call_17; + + // if ((((st32) Pv_new) & 0x1)) {seq(Rx = Rx + s; mem_store_ut32(EA, ((ut32) Rt)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_20 = BRANCH(NON_ZERO(op_AND_8), seq_then_18, seq_else_19); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_9, op_ASSIGN_3, branch_20); + return instruction_sequence; +} + +// Rd = setbit(Rs,Ii) +RzILOpEffect *hex_il_op_s2_setbit_i(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // Rd = (Rs | (0x1 << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(32, 1), VARL("u")); + RzILOpPure *op_OR_6 = LOGOR(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rd_op, op_OR_6); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_3, op_ASSIGN_7); + return instruction_sequence; +} + +// Rd = setbit(Rs,Rt) +RzILOpEffect *hex_il_op_s2_setbit_r(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((st32) (((ut64) Rs) | ((sextract64(((ut64) Rt), 0x0, 0x7) < ((st64) 0x0)) ? ((((ut64) ((ut32) 0x1)) >> (-sextract64(((ut64) Rt), 0x0, 0x7)) - ((st64) 0x1)) >> 0x1) : (((ut64) ((ut32) 0x1)) << sextract64(((ut64) Rt), 0x0, 0x7))))); + RzILOpPure *op_LT_13 = SLT(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7)), CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_NEG_25 = NEG(SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7))); + RzILOpPure *op_SUB_28 = SUB(op_NEG_25, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *op_RSHIFT_29 = SHIFTR0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, SN(32, 1))), op_SUB_28); + RzILOpPure *op_RSHIFT_31 = SHIFTR0(op_RSHIFT_29, SN(32, 1)); + RzILOpPure *op_LSHIFT_43 = SHIFTL0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, SN(32, 1))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7))); + RzILOpPure *cond_44 = ITE(op_LT_13, op_RSHIFT_31, op_LSHIFT_43); + RzILOpPure *op_OR_46 = LOGOR(CAST(64, IL_FALSE, Rs), cond_44); + RzILOpEffect *op_ASSIGN_48 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_46)); + + RzILOpEffect *instruction_sequence = op_ASSIGN_48; + return instruction_sequence; +} + +// Rdd = shuffeb(Rss,Rtt) +RzILOpEffect *hex_il_op_s2_shuffeb(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp502 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp502", VARL("i")); + + // seq(h_tmp502 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x2 * 0x8)))) | (((ut64) (((st64) ((st32) ((st8) ((Rtt >> i * 0x2 * 0x8) & ((st64) 0xff))))) & 0xff)) << i * 0x2 * 0x8))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_MUL_14 = MUL(op_MUL_12, SN(32, 8)); + RzILOpPure *op_LSHIFT_15 = SHIFTL0(SN(64, 0xff), op_MUL_14); + RzILOpPure *op_NOT_16 = LOGNOT(op_LSHIFT_15); + RzILOpPure *op_AND_17 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_16); + RzILOpPure *op_MUL_20 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_MUL_22 = MUL(op_MUL_20, SN(32, 8)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rtt, op_MUL_22); + RzILOpPure *op_AND_26 = LOGAND(op_RSHIFT_23, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_AND_31 = LOGAND(CAST(64, MSB(CAST(32, MSB(CAST(8, MSB(op_AND_26), DUP(op_AND_26))), CAST(8, MSB(DUP(op_AND_26)), DUP(op_AND_26)))), CAST(32, MSB(CAST(8, MSB(DUP(op_AND_26)), DUP(op_AND_26))), CAST(8, MSB(DUP(op_AND_26)), DUP(op_AND_26)))), SN(64, 0xff)); + RzILOpPure *op_MUL_34 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_MUL_36 = MUL(op_MUL_34, SN(32, 8)); + RzILOpPure *op_LSHIFT_37 = SHIFTL0(CAST(64, IL_FALSE, op_AND_31), op_MUL_36); + RzILOpPure *op_OR_39 = LOGOR(CAST(64, IL_FALSE, op_AND_17), op_LSHIFT_37); + RzILOpEffect *op_ASSIGN_41 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_39)); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x2 + 0x1 * 0x8)))) | (((ut64) (((st64) ((st32) ((st8) ((Rss >> i * 0x2 * 0x8) & ((st64) 0xff))))) & 0xff)) << i * 0x2 + 0x1 * 0x8))); + RzILOpPure *op_MUL_45 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_ADD_47 = ADD(op_MUL_45, SN(32, 1)); + RzILOpPure *op_MUL_49 = MUL(op_ADD_47, SN(32, 8)); + RzILOpPure *op_LSHIFT_50 = SHIFTL0(SN(64, 0xff), op_MUL_49); + RzILOpPure *op_NOT_51 = LOGNOT(op_LSHIFT_50); + RzILOpPure *op_AND_52 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_51); + RzILOpPure *op_MUL_55 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_MUL_57 = MUL(op_MUL_55, SN(32, 8)); + RzILOpPure *op_RSHIFT_58 = SHIFTRA(Rss, op_MUL_57); + RzILOpPure *op_AND_61 = LOGAND(op_RSHIFT_58, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_AND_66 = LOGAND(CAST(64, MSB(CAST(32, MSB(CAST(8, MSB(op_AND_61), DUP(op_AND_61))), CAST(8, MSB(DUP(op_AND_61)), DUP(op_AND_61)))), CAST(32, MSB(CAST(8, MSB(DUP(op_AND_61)), DUP(op_AND_61))), CAST(8, MSB(DUP(op_AND_61)), DUP(op_AND_61)))), SN(64, 0xff)); + RzILOpPure *op_MUL_69 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_ADD_71 = ADD(op_MUL_69, SN(32, 1)); + RzILOpPure *op_MUL_73 = MUL(op_ADD_71, SN(32, 8)); + RzILOpPure *op_LSHIFT_74 = SHIFTL0(CAST(64, IL_FALSE, op_AND_66), op_MUL_73); + RzILOpPure *op_OR_76 = LOGOR(CAST(64, IL_FALSE, op_AND_52), op_LSHIFT_74); + RzILOpEffect *op_ASSIGN_78 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_76)); + + // seq(h_tmp502; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x2 * ...; + RzILOpEffect *seq_80 = SEQN(2, op_ASSIGN_41, op_ASSIGN_78); + + // seq(seq(h_tmp502; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0 ...; + RzILOpEffect *seq_81 = SEQN(2, seq_80, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp502; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0 ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_82 = REPEAT(op_LT_4, seq_81); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp502; Rdd = ((st64) ...; + RzILOpEffect *seq_83 = SEQN(2, op_ASSIGN_2, for_82); + + RzILOpEffect *instruction_sequence = seq_83; + return instruction_sequence; +} + +// Rdd = shuffeh(Rss,Rtt) +RzILOpEffect *hex_il_op_s2_shuffeh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp503 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp503", VARL("i")); + + // seq(h_tmp503 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x2 * 0x10)))) | (((ut64) (((st32) ((st16) ((Rtt >> i * 0x2 * 0x10) & ((st64) 0xffff)))) & 0xffff)) << i * 0x2 * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_MUL_14 = MUL(op_MUL_12, SN(32, 16)); + RzILOpPure *op_LSHIFT_15 = SHIFTL0(SN(64, 0xffff), op_MUL_14); + RzILOpPure *op_NOT_16 = LOGNOT(op_LSHIFT_15); + RzILOpPure *op_AND_17 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_16); + RzILOpPure *op_MUL_20 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_MUL_22 = MUL(op_MUL_20, SN(32, 16)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rtt, op_MUL_22); + RzILOpPure *op_AND_26 = LOGAND(op_RSHIFT_23, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_30 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_26), DUP(op_AND_26))), CAST(16, MSB(DUP(op_AND_26)), DUP(op_AND_26))), SN(32, 0xffff)); + RzILOpPure *op_MUL_33 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_MUL_35 = MUL(op_MUL_33, SN(32, 16)); + RzILOpPure *op_LSHIFT_36 = SHIFTL0(CAST(64, IL_FALSE, op_AND_30), op_MUL_35); + RzILOpPure *op_OR_38 = LOGOR(CAST(64, IL_FALSE, op_AND_17), op_LSHIFT_36); + RzILOpEffect *op_ASSIGN_40 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_38)); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x2 + 0x1 * 0x10)))) | (((ut64) (((st32) ((st16) ((Rss >> i * 0x2 * 0x10) & ((st64) 0xffff)))) & 0xffff)) << i * 0x2 + 0x1 * 0x10))); + RzILOpPure *op_MUL_44 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_ADD_46 = ADD(op_MUL_44, SN(32, 1)); + RzILOpPure *op_MUL_48 = MUL(op_ADD_46, SN(32, 16)); + RzILOpPure *op_LSHIFT_49 = SHIFTL0(SN(64, 0xffff), op_MUL_48); + RzILOpPure *op_NOT_50 = LOGNOT(op_LSHIFT_49); + RzILOpPure *op_AND_51 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_50); + RzILOpPure *op_MUL_54 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_MUL_56 = MUL(op_MUL_54, SN(32, 16)); + RzILOpPure *op_RSHIFT_57 = SHIFTRA(Rss, op_MUL_56); + RzILOpPure *op_AND_60 = LOGAND(op_RSHIFT_57, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_64 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_60), DUP(op_AND_60))), CAST(16, MSB(DUP(op_AND_60)), DUP(op_AND_60))), SN(32, 0xffff)); + RzILOpPure *op_MUL_67 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_ADD_69 = ADD(op_MUL_67, SN(32, 1)); + RzILOpPure *op_MUL_71 = MUL(op_ADD_69, SN(32, 16)); + RzILOpPure *op_LSHIFT_72 = SHIFTL0(CAST(64, IL_FALSE, op_AND_64), op_MUL_71); + RzILOpPure *op_OR_74 = LOGOR(CAST(64, IL_FALSE, op_AND_51), op_LSHIFT_72); + RzILOpEffect *op_ASSIGN_76 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_74)); + + // seq(h_tmp503; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x2 ...; + RzILOpEffect *seq_78 = SEQN(2, op_ASSIGN_40, op_ASSIGN_76); + + // seq(seq(h_tmp503; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_79 = SEQN(2, seq_78, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp503; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_80 = REPEAT(op_LT_4, seq_79); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp503; Rdd = ((st64) ...; + RzILOpEffect *seq_81 = SEQN(2, op_ASSIGN_2, for_80); + + RzILOpEffect *instruction_sequence = seq_81; + return instruction_sequence; +} + +// Rdd = shuffob(Rtt,Rss) +RzILOpEffect *hex_il_op_s2_shuffob(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp504 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp504", VARL("i")); + + // seq(h_tmp504 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x2 * 0x8)))) | (((ut64) (((st64) ((st32) ((st8) ((Rss >> i * 0x2 + 0x1 * 0x8) & ((st64) 0xff))))) & 0xff)) << i * 0x2 * 0x8))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_MUL_14 = MUL(op_MUL_12, SN(32, 8)); + RzILOpPure *op_LSHIFT_15 = SHIFTL0(SN(64, 0xff), op_MUL_14); + RzILOpPure *op_NOT_16 = LOGNOT(op_LSHIFT_15); + RzILOpPure *op_AND_17 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_16); + RzILOpPure *op_MUL_20 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_ADD_22 = ADD(op_MUL_20, SN(32, 1)); + RzILOpPure *op_MUL_24 = MUL(op_ADD_22, SN(32, 8)); + RzILOpPure *op_RSHIFT_25 = SHIFTRA(Rss, op_MUL_24); + RzILOpPure *op_AND_28 = LOGAND(op_RSHIFT_25, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_AND_33 = LOGAND(CAST(64, MSB(CAST(32, MSB(CAST(8, MSB(op_AND_28), DUP(op_AND_28))), CAST(8, MSB(DUP(op_AND_28)), DUP(op_AND_28)))), CAST(32, MSB(CAST(8, MSB(DUP(op_AND_28)), DUP(op_AND_28))), CAST(8, MSB(DUP(op_AND_28)), DUP(op_AND_28)))), SN(64, 0xff)); + RzILOpPure *op_MUL_36 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_MUL_38 = MUL(op_MUL_36, SN(32, 8)); + RzILOpPure *op_LSHIFT_39 = SHIFTL0(CAST(64, IL_FALSE, op_AND_33), op_MUL_38); + RzILOpPure *op_OR_41 = LOGOR(CAST(64, IL_FALSE, op_AND_17), op_LSHIFT_39); + RzILOpEffect *op_ASSIGN_43 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_41)); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x2 + 0x1 * 0x8)))) | (((ut64) (((st64) ((st32) ((st8) ((Rtt >> i * 0x2 + 0x1 * 0x8) & ((st64) 0xff))))) & 0xff)) << i * 0x2 + 0x1 * 0x8))); + RzILOpPure *op_MUL_47 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_ADD_49 = ADD(op_MUL_47, SN(32, 1)); + RzILOpPure *op_MUL_51 = MUL(op_ADD_49, SN(32, 8)); + RzILOpPure *op_LSHIFT_52 = SHIFTL0(SN(64, 0xff), op_MUL_51); + RzILOpPure *op_NOT_53 = LOGNOT(op_LSHIFT_52); + RzILOpPure *op_AND_54 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_53); + RzILOpPure *op_MUL_57 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_ADD_59 = ADD(op_MUL_57, SN(32, 1)); + RzILOpPure *op_MUL_61 = MUL(op_ADD_59, SN(32, 8)); + RzILOpPure *op_RSHIFT_62 = SHIFTRA(Rtt, op_MUL_61); + RzILOpPure *op_AND_65 = LOGAND(op_RSHIFT_62, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_AND_70 = LOGAND(CAST(64, MSB(CAST(32, MSB(CAST(8, MSB(op_AND_65), DUP(op_AND_65))), CAST(8, MSB(DUP(op_AND_65)), DUP(op_AND_65)))), CAST(32, MSB(CAST(8, MSB(DUP(op_AND_65)), DUP(op_AND_65))), CAST(8, MSB(DUP(op_AND_65)), DUP(op_AND_65)))), SN(64, 0xff)); + RzILOpPure *op_MUL_73 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_ADD_75 = ADD(op_MUL_73, SN(32, 1)); + RzILOpPure *op_MUL_77 = MUL(op_ADD_75, SN(32, 8)); + RzILOpPure *op_LSHIFT_78 = SHIFTL0(CAST(64, IL_FALSE, op_AND_70), op_MUL_77); + RzILOpPure *op_OR_80 = LOGOR(CAST(64, IL_FALSE, op_AND_54), op_LSHIFT_78); + RzILOpEffect *op_ASSIGN_82 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_80)); + + // seq(h_tmp504; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x2 * ...; + RzILOpEffect *seq_84 = SEQN(2, op_ASSIGN_43, op_ASSIGN_82); + + // seq(seq(h_tmp504; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0 ...; + RzILOpEffect *seq_85 = SEQN(2, seq_84, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp504; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0 ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_86 = REPEAT(op_LT_4, seq_85); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp504; Rdd = ((st64) ...; + RzILOpEffect *seq_87 = SEQN(2, op_ASSIGN_2, for_86); + + RzILOpEffect *instruction_sequence = seq_87; + return instruction_sequence; +} + +// Rdd = shuffoh(Rtt,Rss) +RzILOpEffect *hex_il_op_s2_shuffoh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp505 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp505", VARL("i")); + + // seq(h_tmp505 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x2 * 0x10)))) | (((ut64) (((st32) ((st16) ((Rss >> i * 0x2 + 0x1 * 0x10) & ((st64) 0xffff)))) & 0xffff)) << i * 0x2 * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_MUL_14 = MUL(op_MUL_12, SN(32, 16)); + RzILOpPure *op_LSHIFT_15 = SHIFTL0(SN(64, 0xffff), op_MUL_14); + RzILOpPure *op_NOT_16 = LOGNOT(op_LSHIFT_15); + RzILOpPure *op_AND_17 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_16); + RzILOpPure *op_MUL_20 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_ADD_22 = ADD(op_MUL_20, SN(32, 1)); + RzILOpPure *op_MUL_24 = MUL(op_ADD_22, SN(32, 16)); + RzILOpPure *op_RSHIFT_25 = SHIFTRA(Rss, op_MUL_24); + RzILOpPure *op_AND_28 = LOGAND(op_RSHIFT_25, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_32 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_28), DUP(op_AND_28))), CAST(16, MSB(DUP(op_AND_28)), DUP(op_AND_28))), SN(32, 0xffff)); + RzILOpPure *op_MUL_35 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_MUL_37 = MUL(op_MUL_35, SN(32, 16)); + RzILOpPure *op_LSHIFT_38 = SHIFTL0(CAST(64, IL_FALSE, op_AND_32), op_MUL_37); + RzILOpPure *op_OR_40 = LOGOR(CAST(64, IL_FALSE, op_AND_17), op_LSHIFT_38); + RzILOpEffect *op_ASSIGN_42 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_40)); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x2 + 0x1 * 0x10)))) | (((ut64) (((st32) ((st16) ((Rtt >> i * 0x2 + 0x1 * 0x10) & ((st64) 0xffff)))) & 0xffff)) << i * 0x2 + 0x1 * 0x10))); + RzILOpPure *op_MUL_46 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_ADD_48 = ADD(op_MUL_46, SN(32, 1)); + RzILOpPure *op_MUL_50 = MUL(op_ADD_48, SN(32, 16)); + RzILOpPure *op_LSHIFT_51 = SHIFTL0(SN(64, 0xffff), op_MUL_50); + RzILOpPure *op_NOT_52 = LOGNOT(op_LSHIFT_51); + RzILOpPure *op_AND_53 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_52); + RzILOpPure *op_MUL_56 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_ADD_58 = ADD(op_MUL_56, SN(32, 1)); + RzILOpPure *op_MUL_60 = MUL(op_ADD_58, SN(32, 16)); + RzILOpPure *op_RSHIFT_61 = SHIFTRA(Rtt, op_MUL_60); + RzILOpPure *op_AND_64 = LOGAND(op_RSHIFT_61, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_68 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_64), DUP(op_AND_64))), CAST(16, MSB(DUP(op_AND_64)), DUP(op_AND_64))), SN(32, 0xffff)); + RzILOpPure *op_MUL_71 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_ADD_73 = ADD(op_MUL_71, SN(32, 1)); + RzILOpPure *op_MUL_75 = MUL(op_ADD_73, SN(32, 16)); + RzILOpPure *op_LSHIFT_76 = SHIFTL0(CAST(64, IL_FALSE, op_AND_68), op_MUL_75); + RzILOpPure *op_OR_78 = LOGOR(CAST(64, IL_FALSE, op_AND_53), op_LSHIFT_76); + RzILOpEffect *op_ASSIGN_80 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_78)); + + // seq(h_tmp505; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x2 ...; + RzILOpEffect *seq_82 = SEQN(2, op_ASSIGN_42, op_ASSIGN_80); + + // seq(seq(h_tmp505; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_83 = SEQN(2, seq_82, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp505; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_84 = REPEAT(op_LT_4, seq_83); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp505; Rdd = ((st64) ...; + RzILOpEffect *seq_85 = SEQN(2, op_ASSIGN_2, for_84); + + RzILOpEffect *instruction_sequence = seq_85; + return instruction_sequence; +} + +// memb(Rs+Ii) = Rt +RzILOpEffect *hex_il_op_s2_storerb_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // EA = ((ut32) Rs + s); + RzILOpPure *op_ADD_4 = ADD(Rs, VARL("s")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", CAST(32, IL_FALSE, op_ADD_4)); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_12 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_14 = LOGAND(op_RSHIFT_12, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_16_17 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_14), DUP(op_AND_14)))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, ms_cast_ut8_16_17); + return instruction_sequence; +} + +// memb(Rx++Mu:brev) = Rt +RzILOpEffect *hex_il_op_s2_storerb_pbr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // fbrev(((ut32) Rx)); + RzILOpEffect *fbrev_call_3 = hex_fbrev(CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // h_tmp506 = fbrev(((ut32) Rx)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_5 = SETL("h_tmp506", UNSIGNED(32, VARL("ret_val"))); + + // seq(fbrev(((ut32) Rx)); h_tmp506 = fbrev(((ut32) Rx))); + RzILOpEffect *seq_6 = SEQN(2, fbrev_call_3, op_ASSIGN_hybrid_tmp_5); + + // EA = h_tmp506; + RzILOpEffect *op_ASSIGN_7 = SETL("EA", VARL("h_tmp506")); + + // seq(seq(fbrev(((ut32) Rx)); h_tmp506 = fbrev(((ut32) Rx))); EA = ...; + RzILOpEffect *seq_8 = SEQN(2, seq_6, op_ASSIGN_7); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_10 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rx_op, op_ADD_10); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_21_22 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_19), DUP(op_AND_19)))); + + RzILOpEffect *instruction_sequence = SEQN(3, seq_8, op_ASSIGN_11, ms_cast_ut8_21_22); + return instruction_sequence; +} + +// memb(Rx++Ii:circ(Mu)) = Rt +RzILOpEffect *hex_il_op_s2_storerb_pci(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *fcirc_add_call_9 = hex_fcirc_add(bundle, Rx_op, VARL("s"), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp507 = fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_11 = SETL("h_tmp507", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); ...; + RzILOpEffect *seq_12 = SEQN(2, fcirc_add_call_9, op_ASSIGN_hybrid_tmp_11); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_18 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_20 = LOGAND(op_RSHIFT_18, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_22_23 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_20), DUP(op_AND_20)))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_5, seq_12, op_ASSIGN_3, ms_cast_ut8_22_23); + return instruction_sequence; +} + +// memb(Rx++I:circ(Mu)) = Rt +RzILOpEffect *hex_il_op_s2_storerb_pcr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x0)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpPure *op_AND_10 = LOGAND(DUP(Mu), SN(32, 0xf0000000)); + RzILOpPure *op_RSHIFT_12 = SHIFTRA(op_AND_10, SN(32, 21)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(DUP(Mu), SN(32, 17)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0x7f)); + RzILOpPure *op_OR_17 = LOGOR(op_RSHIFT_12, op_AND_16); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(SEXTRACT64(CAST(64, IL_FALSE, op_OR_17), SN(32, 0), SN(32, 11)), SN(32, 0)); + RzILOpEffect *fcirc_add_call_27 = hex_fcirc_add(bundle, Rx_op, CAST(32, MSB(op_LSHIFT_24), DUP(op_LSHIFT_24)), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp508 = fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x0)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_29 = SETL("h_tmp508", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0x ...; + RzILOpEffect *seq_30 = SEQN(2, fcirc_add_call_27, op_ASSIGN_hybrid_tmp_29); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_36 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_36, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_40_41 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_38), DUP(op_AND_38)))); + + RzILOpEffect *instruction_sequence = SEQN(3, seq_30, op_ASSIGN_3, ms_cast_ut8_40_41); + return instruction_sequence; +} + +// memb(Rx++Ii) = Rt +RzILOpEffect *hex_il_op_s2_storerb_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_7 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rx_op, op_ADD_7); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_18_19 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_16), DUP(op_AND_16)))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_5, op_ASSIGN_3, op_ASSIGN_8, ms_cast_ut8_18_19); + return instruction_sequence; +} + +// memb(Rx++Mu) = Rt +RzILOpEffect *hex_il_op_s2_storerb_pr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_6 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rx_op, op_ADD_6); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_13 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_13, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_17_18 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_15), DUP(op_AND_15)))); + + RzILOpEffect *instruction_sequence = SEQN(3, op_ASSIGN_3, op_ASSIGN_7, ms_cast_ut8_17_18); + return instruction_sequence; +} + +// memb(gp+Ii) = Rt +RzILOpEffect *hex_il_op_s2_storerbgp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp gp_op = ALIAS2OP(HEX_REG_ALIAS_GP, false); + RzILOpPure *gp = READ_REG(pkt, &gp_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = gp + u; + RzILOpPure *op_ADD_4 = ADD(gp, VARL("u")); + RzILOpEffect *op_ASSIGN_5 = SETL("EA", op_ADD_4); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_11 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_13 = LOGAND(op_RSHIFT_11, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_15_16 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_13), DUP(op_AND_13)))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_5, ms_cast_ut8_15_16); + return instruction_sequence; +} + +// memb(Rs+Ii) = Nt.new +RzILOpEffect *hex_il_op_s2_storerbnew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // EA = ((ut32) Rs + s); + RzILOpPure *op_ADD_4 = ADD(Rs, VARL("s")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", CAST(32, IL_FALSE, op_ADD_4)); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_12 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_14 = LOGAND(op_RSHIFT_12, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_16_17 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_14), DUP(op_AND_14)))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, ms_cast_ut8_16_17); + return instruction_sequence; +} + +// memb(Rx++Mu:brev) = Nt.new +RzILOpEffect *hex_il_op_s2_storerbnew_pbr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // fbrev(((ut32) Rx)); + RzILOpEffect *fbrev_call_3 = hex_fbrev(CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // h_tmp509 = fbrev(((ut32) Rx)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_5 = SETL("h_tmp509", UNSIGNED(32, VARL("ret_val"))); + + // seq(fbrev(((ut32) Rx)); h_tmp509 = fbrev(((ut32) Rx))); + RzILOpEffect *seq_6 = SEQN(2, fbrev_call_3, op_ASSIGN_hybrid_tmp_5); + + // EA = h_tmp509; + RzILOpEffect *op_ASSIGN_7 = SETL("EA", VARL("h_tmp509")); + + // seq(seq(fbrev(((ut32) Rx)); h_tmp509 = fbrev(((ut32) Rx))); EA = ...; + RzILOpEffect *seq_8 = SEQN(2, seq_6, op_ASSIGN_7); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_10 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rx_op, op_ADD_10); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_21_22 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_19), DUP(op_AND_19)))); + + RzILOpEffect *instruction_sequence = SEQN(3, seq_8, op_ASSIGN_11, ms_cast_ut8_21_22); + return instruction_sequence; +} + +// memb(Rx++Ii:circ(Mu)) = Nt.new +RzILOpEffect *hex_il_op_s2_storerbnew_pci(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *fcirc_add_call_9 = hex_fcirc_add(bundle, Rx_op, VARL("s"), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp510 = fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_11 = SETL("h_tmp510", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); ...; + RzILOpEffect *seq_12 = SEQN(2, fcirc_add_call_9, op_ASSIGN_hybrid_tmp_11); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_18 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_20 = LOGAND(op_RSHIFT_18, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_22_23 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_20), DUP(op_AND_20)))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_5, seq_12, op_ASSIGN_3, ms_cast_ut8_22_23); + return instruction_sequence; +} + +// memb(Rx++I:circ(Mu)) = Nt.new +RzILOpEffect *hex_il_op_s2_storerbnew_pcr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x0)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpPure *op_AND_10 = LOGAND(DUP(Mu), SN(32, 0xf0000000)); + RzILOpPure *op_RSHIFT_12 = SHIFTRA(op_AND_10, SN(32, 21)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(DUP(Mu), SN(32, 17)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0x7f)); + RzILOpPure *op_OR_17 = LOGOR(op_RSHIFT_12, op_AND_16); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(SEXTRACT64(CAST(64, IL_FALSE, op_OR_17), SN(32, 0), SN(32, 11)), SN(32, 0)); + RzILOpEffect *fcirc_add_call_27 = hex_fcirc_add(bundle, Rx_op, CAST(32, MSB(op_LSHIFT_24), DUP(op_LSHIFT_24)), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp511 = fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x0)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_29 = SETL("h_tmp511", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0x ...; + RzILOpEffect *seq_30 = SEQN(2, fcirc_add_call_27, op_ASSIGN_hybrid_tmp_29); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_36 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_36, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_40_41 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_38), DUP(op_AND_38)))); + + RzILOpEffect *instruction_sequence = SEQN(3, seq_30, op_ASSIGN_3, ms_cast_ut8_40_41); + return instruction_sequence; +} + +// memb(Rx++Ii) = Nt.new +RzILOpEffect *hex_il_op_s2_storerbnew_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_7 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rx_op, op_ADD_7); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_18_19 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_16), DUP(op_AND_16)))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_5, op_ASSIGN_3, op_ASSIGN_8, ms_cast_ut8_18_19); + return instruction_sequence; +} + +// memb(Rx++Mu) = Nt.new +RzILOpEffect *hex_il_op_s2_storerbnew_pr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_6 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rx_op, op_ADD_6); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_13 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_13, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_17_18 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_15), DUP(op_AND_15)))); + + RzILOpEffect *instruction_sequence = SEQN(3, op_ASSIGN_3, op_ASSIGN_7, ms_cast_ut8_17_18); + return instruction_sequence; +} + +// memb(gp+Ii) = Nt.new +RzILOpEffect *hex_il_op_s2_storerbnewgp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp gp_op = ALIAS2OP(HEX_REG_ALIAS_GP, false); + RzILOpPure *gp = READ_REG(pkt, &gp_op, false); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = gp + u; + RzILOpPure *op_ADD_4 = ADD(gp, VARL("u")); + RzILOpEffect *op_ASSIGN_5 = SETL("EA", op_ADD_4); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_11 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_13 = LOGAND(op_RSHIFT_11, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_15_16 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_13), DUP(op_AND_13)))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_5, ms_cast_ut8_15_16); + return instruction_sequence; +} + +// memd(Rs+Ii) = Rtt +RzILOpEffect *hex_il_op_s2_storerd_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // EA = ((ut32) Rs + s); + RzILOpPure *op_ADD_4 = ADD(Rs, VARL("s")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", CAST(32, IL_FALSE, op_ADD_4)); + + // mem_store_ut64(EA, ((ut64) Rtt)); + RzILOpEffect *ms_cast_ut64_9_10 = STOREW(VARL("EA"), CAST(64, IL_FALSE, Rtt)); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, ms_cast_ut64_9_10); + return instruction_sequence; +} + +// memd(Rx++Mu:brev) = Rtt +RzILOpEffect *hex_il_op_s2_storerd_pbr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // fbrev(((ut32) Rx)); + RzILOpEffect *fbrev_call_3 = hex_fbrev(CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // h_tmp512 = fbrev(((ut32) Rx)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_5 = SETL("h_tmp512", UNSIGNED(32, VARL("ret_val"))); + + // seq(fbrev(((ut32) Rx)); h_tmp512 = fbrev(((ut32) Rx))); + RzILOpEffect *seq_6 = SEQN(2, fbrev_call_3, op_ASSIGN_hybrid_tmp_5); + + // EA = h_tmp512; + RzILOpEffect *op_ASSIGN_7 = SETL("EA", VARL("h_tmp512")); + + // seq(seq(fbrev(((ut32) Rx)); h_tmp512 = fbrev(((ut32) Rx))); EA = ...; + RzILOpEffect *seq_8 = SEQN(2, seq_6, op_ASSIGN_7); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_10 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rx_op, op_ADD_10); + + // mem_store_ut64(EA, ((ut64) Rtt)); + RzILOpEffect *ms_cast_ut64_14_15 = STOREW(VARL("EA"), CAST(64, IL_FALSE, Rtt)); + + RzILOpEffect *instruction_sequence = SEQN(3, seq_8, op_ASSIGN_11, ms_cast_ut64_14_15); + return instruction_sequence; +} + +// memd(Rx++Ii:circ(Mu)) = Rtt +RzILOpEffect *hex_il_op_s2_storerd_pci(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *fcirc_add_call_9 = hex_fcirc_add(bundle, Rx_op, VARL("s"), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp513 = fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_11 = SETL("h_tmp513", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); ...; + RzILOpEffect *seq_12 = SEQN(2, fcirc_add_call_9, op_ASSIGN_hybrid_tmp_11); + + // mem_store_ut64(EA, ((ut64) Rtt)); + RzILOpEffect *ms_cast_ut64_15_16 = STOREW(VARL("EA"), CAST(64, IL_FALSE, Rtt)); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_5, seq_12, op_ASSIGN_3, ms_cast_ut64_15_16); + return instruction_sequence; +} + +// memd(Rx++I:circ(Mu)) = Rtt +RzILOpEffect *hex_il_op_s2_storerd_pcr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x3)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpPure *op_AND_10 = LOGAND(DUP(Mu), SN(32, 0xf0000000)); + RzILOpPure *op_RSHIFT_12 = SHIFTRA(op_AND_10, SN(32, 21)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(DUP(Mu), SN(32, 17)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0x7f)); + RzILOpPure *op_OR_17 = LOGOR(op_RSHIFT_12, op_AND_16); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(SEXTRACT64(CAST(64, IL_FALSE, op_OR_17), SN(32, 0), SN(32, 11)), SN(32, 3)); + RzILOpEffect *fcirc_add_call_27 = hex_fcirc_add(bundle, Rx_op, CAST(32, MSB(op_LSHIFT_24), DUP(op_LSHIFT_24)), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp514 = fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x3)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_29 = SETL("h_tmp514", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0x ...; + RzILOpEffect *seq_30 = SEQN(2, fcirc_add_call_27, op_ASSIGN_hybrid_tmp_29); + + // mem_store_ut64(EA, ((ut64) Rtt)); + RzILOpEffect *ms_cast_ut64_33_34 = STOREW(VARL("EA"), CAST(64, IL_FALSE, Rtt)); + + RzILOpEffect *instruction_sequence = SEQN(3, seq_30, op_ASSIGN_3, ms_cast_ut64_33_34); + return instruction_sequence; +} + +// memd(Rx++Ii) = Rtt +RzILOpEffect *hex_il_op_s2_storerd_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_7 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rx_op, op_ADD_7); + + // mem_store_ut64(EA, ((ut64) Rtt)); + RzILOpEffect *ms_cast_ut64_11_12 = STOREW(VARL("EA"), CAST(64, IL_FALSE, Rtt)); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_5, op_ASSIGN_3, op_ASSIGN_8, ms_cast_ut64_11_12); + return instruction_sequence; +} + +// memd(Rx++Mu) = Rtt +RzILOpEffect *hex_il_op_s2_storerd_pr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_6 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rx_op, op_ADD_6); + + // mem_store_ut64(EA, ((ut64) Rtt)); + RzILOpEffect *ms_cast_ut64_10_11 = STOREW(VARL("EA"), CAST(64, IL_FALSE, Rtt)); + + RzILOpEffect *instruction_sequence = SEQN(3, op_ASSIGN_3, op_ASSIGN_7, ms_cast_ut64_10_11); + return instruction_sequence; +} + +// memd(gp+Ii) = Rtt +RzILOpEffect *hex_il_op_s2_storerdgp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp gp_op = ALIAS2OP(HEX_REG_ALIAS_GP, false); + RzILOpPure *gp = READ_REG(pkt, &gp_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = gp + u; + RzILOpPure *op_ADD_4 = ADD(gp, VARL("u")); + RzILOpEffect *op_ASSIGN_5 = SETL("EA", op_ADD_4); + + // mem_store_ut64(EA, ((ut64) Rtt)); + RzILOpEffect *ms_cast_ut64_8_9 = STOREW(VARL("EA"), CAST(64, IL_FALSE, Rtt)); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_5, ms_cast_ut64_8_9); + return instruction_sequence; +} + +// memh(Rs+Ii) = Rt.h +RzILOpEffect *hex_il_op_s2_storerf_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // EA = ((ut32) Rs + s); + RzILOpPure *op_ADD_4 = ADD(Rs, VARL("s")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", CAST(32, IL_FALSE, op_ADD_4)); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_12 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_14 = LOGAND(op_RSHIFT_12, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_16_17 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_14), DUP(op_AND_14)))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, ms_cast_ut16_16_17); + return instruction_sequence; +} + +// memh(Rx++Mu:brev) = Rt.h +RzILOpEffect *hex_il_op_s2_storerf_pbr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // fbrev(((ut32) Rx)); + RzILOpEffect *fbrev_call_3 = hex_fbrev(CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // h_tmp515 = fbrev(((ut32) Rx)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_5 = SETL("h_tmp515", UNSIGNED(32, VARL("ret_val"))); + + // seq(fbrev(((ut32) Rx)); h_tmp515 = fbrev(((ut32) Rx))); + RzILOpEffect *seq_6 = SEQN(2, fbrev_call_3, op_ASSIGN_hybrid_tmp_5); + + // EA = h_tmp515; + RzILOpEffect *op_ASSIGN_7 = SETL("EA", VARL("h_tmp515")); + + // seq(seq(fbrev(((ut32) Rx)); h_tmp515 = fbrev(((ut32) Rx))); EA = ...; + RzILOpEffect *seq_8 = SEQN(2, seq_6, op_ASSIGN_7); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_10 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rx_op, op_ADD_10); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_21_22 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_19), DUP(op_AND_19)))); + + RzILOpEffect *instruction_sequence = SEQN(3, seq_8, op_ASSIGN_11, ms_cast_ut16_21_22); + return instruction_sequence; +} + +// memh(Rx++Ii:circ(Mu)) = Rt.h +RzILOpEffect *hex_il_op_s2_storerf_pci(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *fcirc_add_call_9 = hex_fcirc_add(bundle, Rx_op, VARL("s"), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp516 = fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_11 = SETL("h_tmp516", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); ...; + RzILOpEffect *seq_12 = SEQN(2, fcirc_add_call_9, op_ASSIGN_hybrid_tmp_11); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_18 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_20 = LOGAND(op_RSHIFT_18, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_22_23 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_20), DUP(op_AND_20)))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_5, seq_12, op_ASSIGN_3, ms_cast_ut16_22_23); + return instruction_sequence; +} + +// memh(Rx++I:circ(Mu)) = Rt.h +RzILOpEffect *hex_il_op_s2_storerf_pcr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x1)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpPure *op_AND_10 = LOGAND(DUP(Mu), SN(32, 0xf0000000)); + RzILOpPure *op_RSHIFT_12 = SHIFTRA(op_AND_10, SN(32, 21)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(DUP(Mu), SN(32, 17)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0x7f)); + RzILOpPure *op_OR_17 = LOGOR(op_RSHIFT_12, op_AND_16); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(SEXTRACT64(CAST(64, IL_FALSE, op_OR_17), SN(32, 0), SN(32, 11)), SN(32, 1)); + RzILOpEffect *fcirc_add_call_27 = hex_fcirc_add(bundle, Rx_op, CAST(32, MSB(op_LSHIFT_24), DUP(op_LSHIFT_24)), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp517 = fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x1)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_29 = SETL("h_tmp517", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0x ...; + RzILOpEffect *seq_30 = SEQN(2, fcirc_add_call_27, op_ASSIGN_hybrid_tmp_29); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_36 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_36, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_40_41 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_38), DUP(op_AND_38)))); + + RzILOpEffect *instruction_sequence = SEQN(3, seq_30, op_ASSIGN_3, ms_cast_ut16_40_41); + return instruction_sequence; +} + +// memh(Rx++Ii) = Rt.h +RzILOpEffect *hex_il_op_s2_storerf_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_7 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rx_op, op_ADD_7); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_18_19 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_16), DUP(op_AND_16)))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_5, op_ASSIGN_3, op_ASSIGN_8, ms_cast_ut16_18_19); + return instruction_sequence; +} + +// memh(Rx++Mu) = Rt.h +RzILOpEffect *hex_il_op_s2_storerf_pr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_6 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rx_op, op_ADD_6); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_13 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_13, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_17_18 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_15), DUP(op_AND_15)))); + + RzILOpEffect *instruction_sequence = SEQN(3, op_ASSIGN_3, op_ASSIGN_7, ms_cast_ut16_17_18); + return instruction_sequence; +} + +// memh(gp+Ii) = Rt.h +RzILOpEffect *hex_il_op_s2_storerfgp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp gp_op = ALIAS2OP(HEX_REG_ALIAS_GP, false); + RzILOpPure *gp = READ_REG(pkt, &gp_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = gp + u; + RzILOpPure *op_ADD_4 = ADD(gp, VARL("u")); + RzILOpEffect *op_ASSIGN_5 = SETL("EA", op_ADD_4); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_11 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_13 = LOGAND(op_RSHIFT_11, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_15_16 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_13), DUP(op_AND_13)))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_5, ms_cast_ut16_15_16); + return instruction_sequence; +} + +// memh(Rs+Ii) = Rt +RzILOpEffect *hex_il_op_s2_storerh_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // EA = ((ut32) Rs + s); + RzILOpPure *op_ADD_4 = ADD(Rs, VARL("s")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", CAST(32, IL_FALSE, op_ADD_4)); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_12 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_14 = LOGAND(op_RSHIFT_12, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_16_17 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_14), DUP(op_AND_14)))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, ms_cast_ut16_16_17); + return instruction_sequence; +} + +// memh(Rx++Mu:brev) = Rt +RzILOpEffect *hex_il_op_s2_storerh_pbr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // fbrev(((ut32) Rx)); + RzILOpEffect *fbrev_call_3 = hex_fbrev(CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // h_tmp518 = fbrev(((ut32) Rx)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_5 = SETL("h_tmp518", UNSIGNED(32, VARL("ret_val"))); + + // seq(fbrev(((ut32) Rx)); h_tmp518 = fbrev(((ut32) Rx))); + RzILOpEffect *seq_6 = SEQN(2, fbrev_call_3, op_ASSIGN_hybrid_tmp_5); + + // EA = h_tmp518; + RzILOpEffect *op_ASSIGN_7 = SETL("EA", VARL("h_tmp518")); + + // seq(seq(fbrev(((ut32) Rx)); h_tmp518 = fbrev(((ut32) Rx))); EA = ...; + RzILOpEffect *seq_8 = SEQN(2, seq_6, op_ASSIGN_7); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_10 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rx_op, op_ADD_10); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_21_22 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_19), DUP(op_AND_19)))); + + RzILOpEffect *instruction_sequence = SEQN(3, seq_8, op_ASSIGN_11, ms_cast_ut16_21_22); + return instruction_sequence; +} + +// memh(Rx++Ii:circ(Mu)) = Rt +RzILOpEffect *hex_il_op_s2_storerh_pci(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *fcirc_add_call_9 = hex_fcirc_add(bundle, Rx_op, VARL("s"), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp519 = fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_11 = SETL("h_tmp519", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); ...; + RzILOpEffect *seq_12 = SEQN(2, fcirc_add_call_9, op_ASSIGN_hybrid_tmp_11); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_18 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_20 = LOGAND(op_RSHIFT_18, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_22_23 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_20), DUP(op_AND_20)))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_5, seq_12, op_ASSIGN_3, ms_cast_ut16_22_23); + return instruction_sequence; +} + +// memh(Rx++I:circ(Mu)) = Rt +RzILOpEffect *hex_il_op_s2_storerh_pcr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x1)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpPure *op_AND_10 = LOGAND(DUP(Mu), SN(32, 0xf0000000)); + RzILOpPure *op_RSHIFT_12 = SHIFTRA(op_AND_10, SN(32, 21)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(DUP(Mu), SN(32, 17)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0x7f)); + RzILOpPure *op_OR_17 = LOGOR(op_RSHIFT_12, op_AND_16); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(SEXTRACT64(CAST(64, IL_FALSE, op_OR_17), SN(32, 0), SN(32, 11)), SN(32, 1)); + RzILOpEffect *fcirc_add_call_27 = hex_fcirc_add(bundle, Rx_op, CAST(32, MSB(op_LSHIFT_24), DUP(op_LSHIFT_24)), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp520 = fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x1)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_29 = SETL("h_tmp520", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0x ...; + RzILOpEffect *seq_30 = SEQN(2, fcirc_add_call_27, op_ASSIGN_hybrid_tmp_29); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_36 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_36, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_40_41 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_38), DUP(op_AND_38)))); + + RzILOpEffect *instruction_sequence = SEQN(3, seq_30, op_ASSIGN_3, ms_cast_ut16_40_41); + return instruction_sequence; +} + +// memh(Rx++Ii) = Rt +RzILOpEffect *hex_il_op_s2_storerh_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_7 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rx_op, op_ADD_7); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_18_19 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_16), DUP(op_AND_16)))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_5, op_ASSIGN_3, op_ASSIGN_8, ms_cast_ut16_18_19); + return instruction_sequence; +} + +// memh(Rx++Mu) = Rt +RzILOpEffect *hex_il_op_s2_storerh_pr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_6 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rx_op, op_ADD_6); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_13 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_13, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_17_18 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_15), DUP(op_AND_15)))); + + RzILOpEffect *instruction_sequence = SEQN(3, op_ASSIGN_3, op_ASSIGN_7, ms_cast_ut16_17_18); + return instruction_sequence; +} + +// memh(gp+Ii) = Rt +RzILOpEffect *hex_il_op_s2_storerhgp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp gp_op = ALIAS2OP(HEX_REG_ALIAS_GP, false); + RzILOpPure *gp = READ_REG(pkt, &gp_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = gp + u; + RzILOpPure *op_ADD_4 = ADD(gp, VARL("u")); + RzILOpEffect *op_ASSIGN_5 = SETL("EA", op_ADD_4); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_11 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_13 = LOGAND(op_RSHIFT_11, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_15_16 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_13), DUP(op_AND_13)))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_5, ms_cast_ut16_15_16); + return instruction_sequence; +} + +// memh(Rs+Ii) = Nt.new +RzILOpEffect *hex_il_op_s2_storerhnew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // EA = ((ut32) Rs + s); + RzILOpPure *op_ADD_4 = ADD(Rs, VARL("s")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", CAST(32, IL_FALSE, op_ADD_4)); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_12 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_14 = LOGAND(op_RSHIFT_12, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_16_17 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_14), DUP(op_AND_14)))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, ms_cast_ut16_16_17); + return instruction_sequence; +} + +// memh(Rx++Mu:brev) = Nt.new +RzILOpEffect *hex_il_op_s2_storerhnew_pbr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // fbrev(((ut32) Rx)); + RzILOpEffect *fbrev_call_3 = hex_fbrev(CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // h_tmp521 = fbrev(((ut32) Rx)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_5 = SETL("h_tmp521", UNSIGNED(32, VARL("ret_val"))); + + // seq(fbrev(((ut32) Rx)); h_tmp521 = fbrev(((ut32) Rx))); + RzILOpEffect *seq_6 = SEQN(2, fbrev_call_3, op_ASSIGN_hybrid_tmp_5); + + // EA = h_tmp521; + RzILOpEffect *op_ASSIGN_7 = SETL("EA", VARL("h_tmp521")); + + // seq(seq(fbrev(((ut32) Rx)); h_tmp521 = fbrev(((ut32) Rx))); EA = ...; + RzILOpEffect *seq_8 = SEQN(2, seq_6, op_ASSIGN_7); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_10 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rx_op, op_ADD_10); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_21_22 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_19), DUP(op_AND_19)))); + + RzILOpEffect *instruction_sequence = SEQN(3, seq_8, op_ASSIGN_11, ms_cast_ut16_21_22); + return instruction_sequence; +} + +// memh(Rx++Ii:circ(Mu)) = Nt.new +RzILOpEffect *hex_il_op_s2_storerhnew_pci(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *fcirc_add_call_9 = hex_fcirc_add(bundle, Rx_op, VARL("s"), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp522 = fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_11 = SETL("h_tmp522", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); ...; + RzILOpEffect *seq_12 = SEQN(2, fcirc_add_call_9, op_ASSIGN_hybrid_tmp_11); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_18 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_20 = LOGAND(op_RSHIFT_18, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_22_23 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_20), DUP(op_AND_20)))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_5, seq_12, op_ASSIGN_3, ms_cast_ut16_22_23); + return instruction_sequence; +} + +// memh(Rx++I:circ(Mu)) = Nt.new +RzILOpEffect *hex_il_op_s2_storerhnew_pcr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x1)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpPure *op_AND_10 = LOGAND(DUP(Mu), SN(32, 0xf0000000)); + RzILOpPure *op_RSHIFT_12 = SHIFTRA(op_AND_10, SN(32, 21)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(DUP(Mu), SN(32, 17)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0x7f)); + RzILOpPure *op_OR_17 = LOGOR(op_RSHIFT_12, op_AND_16); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(SEXTRACT64(CAST(64, IL_FALSE, op_OR_17), SN(32, 0), SN(32, 11)), SN(32, 1)); + RzILOpEffect *fcirc_add_call_27 = hex_fcirc_add(bundle, Rx_op, CAST(32, MSB(op_LSHIFT_24), DUP(op_LSHIFT_24)), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp523 = fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x1)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_29 = SETL("h_tmp523", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0x ...; + RzILOpEffect *seq_30 = SEQN(2, fcirc_add_call_27, op_ASSIGN_hybrid_tmp_29); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_36 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_36, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_40_41 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_38), DUP(op_AND_38)))); + + RzILOpEffect *instruction_sequence = SEQN(3, seq_30, op_ASSIGN_3, ms_cast_ut16_40_41); + return instruction_sequence; +} + +// memh(Rx++Ii) = Nt.new +RzILOpEffect *hex_il_op_s2_storerhnew_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_7 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rx_op, op_ADD_7); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_18_19 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_16), DUP(op_AND_16)))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_5, op_ASSIGN_3, op_ASSIGN_8, ms_cast_ut16_18_19); + return instruction_sequence; +} + +// memh(Rx++Mu) = Nt.new +RzILOpEffect *hex_il_op_s2_storerhnew_pr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_6 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rx_op, op_ADD_6); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_13 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_13, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_17_18 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_15), DUP(op_AND_15)))); + + RzILOpEffect *instruction_sequence = SEQN(3, op_ASSIGN_3, op_ASSIGN_7, ms_cast_ut16_17_18); + return instruction_sequence; +} + +// memh(gp+Ii) = Nt.new +RzILOpEffect *hex_il_op_s2_storerhnewgp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp gp_op = ALIAS2OP(HEX_REG_ALIAS_GP, false); + RzILOpPure *gp = READ_REG(pkt, &gp_op, false); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = gp + u; + RzILOpPure *op_ADD_4 = ADD(gp, VARL("u")); + RzILOpEffect *op_ASSIGN_5 = SETL("EA", op_ADD_4); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_11 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_13 = LOGAND(op_RSHIFT_11, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_15_16 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_13), DUP(op_AND_13)))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_5, ms_cast_ut16_15_16); + return instruction_sequence; +} + +// memw(Rs+Ii) = Rt +RzILOpEffect *hex_il_op_s2_storeri_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // EA = ((ut32) Rs + s); + RzILOpPure *op_ADD_4 = ADD(Rs, VARL("s")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", CAST(32, IL_FALSE, op_ADD_4)); + + // mem_store_ut32(EA, ((ut32) Rt)); + RzILOpEffect *ms_cast_ut32_9_10 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Rt)); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, ms_cast_ut32_9_10); + return instruction_sequence; +} + +// memw(Rx++Mu:brev) = Rt +RzILOpEffect *hex_il_op_s2_storeri_pbr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // fbrev(((ut32) Rx)); + RzILOpEffect *fbrev_call_3 = hex_fbrev(CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // h_tmp524 = fbrev(((ut32) Rx)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_5 = SETL("h_tmp524", UNSIGNED(32, VARL("ret_val"))); + + // seq(fbrev(((ut32) Rx)); h_tmp524 = fbrev(((ut32) Rx))); + RzILOpEffect *seq_6 = SEQN(2, fbrev_call_3, op_ASSIGN_hybrid_tmp_5); + + // EA = h_tmp524; + RzILOpEffect *op_ASSIGN_7 = SETL("EA", VARL("h_tmp524")); + + // seq(seq(fbrev(((ut32) Rx)); h_tmp524 = fbrev(((ut32) Rx))); EA = ...; + RzILOpEffect *seq_8 = SEQN(2, seq_6, op_ASSIGN_7); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_10 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rx_op, op_ADD_10); + + // mem_store_ut32(EA, ((ut32) Rt)); + RzILOpEffect *ms_cast_ut32_14_15 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Rt)); + + RzILOpEffect *instruction_sequence = SEQN(3, seq_8, op_ASSIGN_11, ms_cast_ut32_14_15); + return instruction_sequence; +} + +// memw(Rx++Ii:circ(Mu)) = Rt +RzILOpEffect *hex_il_op_s2_storeri_pci(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *fcirc_add_call_9 = hex_fcirc_add(bundle, Rx_op, VARL("s"), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp525 = fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_11 = SETL("h_tmp525", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); ...; + RzILOpEffect *seq_12 = SEQN(2, fcirc_add_call_9, op_ASSIGN_hybrid_tmp_11); + + // mem_store_ut32(EA, ((ut32) Rt)); + RzILOpEffect *ms_cast_ut32_15_16 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Rt)); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_5, seq_12, op_ASSIGN_3, ms_cast_ut32_15_16); + return instruction_sequence; +} + +// memw(Rx++I:circ(Mu)) = Rt +RzILOpEffect *hex_il_op_s2_storeri_pcr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x2)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpPure *op_AND_10 = LOGAND(DUP(Mu), SN(32, 0xf0000000)); + RzILOpPure *op_RSHIFT_12 = SHIFTRA(op_AND_10, SN(32, 21)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(DUP(Mu), SN(32, 17)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0x7f)); + RzILOpPure *op_OR_17 = LOGOR(op_RSHIFT_12, op_AND_16); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(SEXTRACT64(CAST(64, IL_FALSE, op_OR_17), SN(32, 0), SN(32, 11)), SN(32, 2)); + RzILOpEffect *fcirc_add_call_27 = hex_fcirc_add(bundle, Rx_op, CAST(32, MSB(op_LSHIFT_24), DUP(op_LSHIFT_24)), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp526 = fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x2)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_29 = SETL("h_tmp526", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0x ...; + RzILOpEffect *seq_30 = SEQN(2, fcirc_add_call_27, op_ASSIGN_hybrid_tmp_29); + + // mem_store_ut32(EA, ((ut32) Rt)); + RzILOpEffect *ms_cast_ut32_33_34 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Rt)); + + RzILOpEffect *instruction_sequence = SEQN(3, seq_30, op_ASSIGN_3, ms_cast_ut32_33_34); + return instruction_sequence; +} + +// memw(Rx++Ii) = Rt +RzILOpEffect *hex_il_op_s2_storeri_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_7 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rx_op, op_ADD_7); + + // mem_store_ut32(EA, ((ut32) Rt)); + RzILOpEffect *ms_cast_ut32_11_12 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Rt)); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_5, op_ASSIGN_3, op_ASSIGN_8, ms_cast_ut32_11_12); + return instruction_sequence; +} + +// memw(Rx++Mu) = Rt +RzILOpEffect *hex_il_op_s2_storeri_pr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_6 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rx_op, op_ADD_6); + + // mem_store_ut32(EA, ((ut32) Rt)); + RzILOpEffect *ms_cast_ut32_10_11 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Rt)); + + RzILOpEffect *instruction_sequence = SEQN(3, op_ASSIGN_3, op_ASSIGN_7, ms_cast_ut32_10_11); + return instruction_sequence; +} + +// memw(gp+Ii) = Rt +RzILOpEffect *hex_il_op_s2_storerigp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp gp_op = ALIAS2OP(HEX_REG_ALIAS_GP, false); + RzILOpPure *gp = READ_REG(pkt, &gp_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = gp + u; + RzILOpPure *op_ADD_4 = ADD(gp, VARL("u")); + RzILOpEffect *op_ASSIGN_5 = SETL("EA", op_ADD_4); + + // mem_store_ut32(EA, ((ut32) Rt)); + RzILOpEffect *ms_cast_ut32_8_9 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Rt)); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_5, ms_cast_ut32_8_9); + return instruction_sequence; +} + +// memw(Rs+Ii) = Nt.new +RzILOpEffect *hex_il_op_s2_storerinew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // EA = ((ut32) Rs + s); + RzILOpPure *op_ADD_4 = ADD(Rs, VARL("s")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", CAST(32, IL_FALSE, op_ADD_4)); + + // mem_store_ut32(EA, ((ut32) Nt_new)); + RzILOpEffect *ms_cast_ut32_9_10 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Nt_new)); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, ms_cast_ut32_9_10); + return instruction_sequence; +} + +// memw(Rx++Mu:brev) = Nt.new +RzILOpEffect *hex_il_op_s2_storerinew_pbr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // fbrev(((ut32) Rx)); + RzILOpEffect *fbrev_call_3 = hex_fbrev(CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // h_tmp527 = fbrev(((ut32) Rx)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_5 = SETL("h_tmp527", UNSIGNED(32, VARL("ret_val"))); + + // seq(fbrev(((ut32) Rx)); h_tmp527 = fbrev(((ut32) Rx))); + RzILOpEffect *seq_6 = SEQN(2, fbrev_call_3, op_ASSIGN_hybrid_tmp_5); + + // EA = h_tmp527; + RzILOpEffect *op_ASSIGN_7 = SETL("EA", VARL("h_tmp527")); + + // seq(seq(fbrev(((ut32) Rx)); h_tmp527 = fbrev(((ut32) Rx))); EA = ...; + RzILOpEffect *seq_8 = SEQN(2, seq_6, op_ASSIGN_7); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_10 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rx_op, op_ADD_10); + + // mem_store_ut32(EA, ((ut32) Nt_new)); + RzILOpEffect *ms_cast_ut32_14_15 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Nt_new)); + + RzILOpEffect *instruction_sequence = SEQN(3, seq_8, op_ASSIGN_11, ms_cast_ut32_14_15); + return instruction_sequence; +} + +// memw(Rx++Ii:circ(Mu)) = Nt.new +RzILOpEffect *hex_il_op_s2_storerinew_pci(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *fcirc_add_call_9 = hex_fcirc_add(bundle, Rx_op, VARL("s"), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp528 = fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_11 = SETL("h_tmp528", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, s, Mu, get_corresponding_CS(pkt, Mu)); ...; + RzILOpEffect *seq_12 = SEQN(2, fcirc_add_call_9, op_ASSIGN_hybrid_tmp_11); + + // mem_store_ut32(EA, ((ut32) Nt_new)); + RzILOpEffect *ms_cast_ut32_15_16 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Nt_new)); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_5, seq_12, op_ASSIGN_3, ms_cast_ut32_15_16); + return instruction_sequence; +} + +// memw(Rx++I:circ(Mu)) = Nt.new +RzILOpEffect *hex_il_op_s2_storerinew_pcr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x2)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpPure *op_AND_10 = LOGAND(DUP(Mu), SN(32, 0xf0000000)); + RzILOpPure *op_RSHIFT_12 = SHIFTRA(op_AND_10, SN(32, 21)); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(DUP(Mu), SN(32, 17)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0x7f)); + RzILOpPure *op_OR_17 = LOGOR(op_RSHIFT_12, op_AND_16); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(SEXTRACT64(CAST(64, IL_FALSE, op_OR_17), SN(32, 0), SN(32, 11)), SN(32, 2)); + RzILOpEffect *fcirc_add_call_27 = hex_fcirc_add(bundle, Rx_op, CAST(32, MSB(op_LSHIFT_24), DUP(op_LSHIFT_24)), Mu, HEX_GET_CORRESPONDING_CS(pkt, Mu_op)); + + // h_tmp529 = fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0xf0000000) >> 0x15) | ((Mu >> 0x11) & 0x7f))), 0x0, 0xb) << 0x2)), Mu, get_corresponding_CS(pkt, Mu)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_29 = SETL("h_tmp529", SIGNED(32, VARL("ret_val"))); + + // seq(fcirc_add(bundle, Rx, ((st32) (sextract64(((ut64) (((Mu & 0x ...; + RzILOpEffect *seq_30 = SEQN(2, fcirc_add_call_27, op_ASSIGN_hybrid_tmp_29); + + // mem_store_ut32(EA, ((ut32) Nt_new)); + RzILOpEffect *ms_cast_ut32_33_34 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Nt_new)); + + RzILOpEffect *instruction_sequence = SEQN(3, seq_30, op_ASSIGN_3, ms_cast_ut32_33_34); + return instruction_sequence; +} + +// memw(Rx++Ii) = Nt.new +RzILOpEffect *hex_il_op_s2_storerinew_pi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // s = s; + RzILOpEffect *imm_assign_5 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_7 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_8 = WRITE_REG(bundle, Rx_op, op_ADD_7); + + // mem_store_ut32(EA, ((ut32) Nt_new)); + RzILOpEffect *ms_cast_ut32_11_12 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Nt_new)); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_5, op_ASSIGN_3, op_ASSIGN_8, ms_cast_ut32_11_12); + return instruction_sequence; +} + +// memw(Rx++Mu) = Nt.new +RzILOpEffect *hex_il_op_s2_storerinew_pr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Mu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Mu = READ_REG(pkt, Mu_op, false); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // EA = ((ut32) Rx); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false))); + + // Rx = Rx + Mu; + RzILOpPure *op_ADD_6 = ADD(READ_REG(pkt, Rx_op, false), Mu); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rx_op, op_ADD_6); + + // mem_store_ut32(EA, ((ut32) Nt_new)); + RzILOpEffect *ms_cast_ut32_10_11 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Nt_new)); + + RzILOpEffect *instruction_sequence = SEQN(3, op_ASSIGN_3, op_ASSIGN_7, ms_cast_ut32_10_11); + return instruction_sequence; +} + +// memw(gp+Ii) = Nt.new +RzILOpEffect *hex_il_op_s2_storerinewgp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp gp_op = ALIAS2OP(HEX_REG_ALIAS_GP, false); + RzILOpPure *gp = READ_REG(pkt, &gp_op, false); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = gp + u; + RzILOpPure *op_ADD_4 = ADD(gp, VARL("u")); + RzILOpEffect *op_ASSIGN_5 = SETL("EA", op_ADD_4); + + // mem_store_ut32(EA, ((ut32) Nt_new)); + RzILOpEffect *ms_cast_ut32_8_9 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Nt_new)); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_5, ms_cast_ut32_8_9); + return instruction_sequence; +} + +// memw_locked(Rs,Pd) = Rt +RzILOpEffect *hex_il_op_s2_storew_locked(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// memw_rl(Rs):at = Rt +RzILOpEffect *hex_il_op_s2_storew_rl_at_vi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // EA = ((ut32) Rs); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, Rs)); + + // mem_store_ut32(EA, ((ut32) Rt)); + RzILOpEffect *ms_cast_ut32_6_7 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Rt)); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_3, ms_cast_ut32_6_7); + return instruction_sequence; +} + +// memw_rl(Rs):st = Rt +RzILOpEffect *hex_il_op_s2_storew_rl_st_vi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // EA = ((ut32) Rs); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, Rs)); + + // mem_store_ut32(EA, ((ut32) Rt)); + RzILOpEffect *ms_cast_ut32_6_7 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Rt)); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_3, ms_cast_ut32_6_7); + return instruction_sequence; +} + +// Rd = vsathb(Rs) +RzILOpEffect *hex_il_op_s2_svsathb(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_43 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st16) ((Rs >> 0x0) & 0xffff))), 0x0, 0x8) == ((st64) ((st16) ((Rs >> 0x0) & 0xffff))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rs >> 0x0) & 0xffff))) < 0x0) ? (-(0x1 << 0x7)) : (0x1 << 0x7) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_28 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_28, SN(32, 0xffff)); + RzILOpPure *op_EQ_33 = EQ(SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_18), DUP(op_AND_18))), SN(32, 0), SN(32, 8)), CAST(64, MSB(CAST(16, MSB(op_AND_30), DUP(op_AND_30))), CAST(16, MSB(DUP(op_AND_30)), DUP(op_AND_30)))); + RzILOpPure *op_RSHIFT_47 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_49 = LOGAND(op_RSHIFT_47, SN(32, 0xffff)); + RzILOpPure *op_LT_53 = SLT(CAST(32, MSB(CAST(16, MSB(op_AND_49), DUP(op_AND_49))), CAST(16, MSB(DUP(op_AND_49)), DUP(op_AND_49))), SN(32, 0)); + RzILOpPure *op_LSHIFT_58 = SHIFTL0(SN(64, 1), SN(32, 7)); + RzILOpPure *op_NEG_59 = NEG(op_LSHIFT_58); + RzILOpPure *op_LSHIFT_64 = SHIFTL0(SN(64, 1), SN(32, 7)); + RzILOpPure *op_SUB_67 = SUB(op_LSHIFT_64, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_68 = ITE(op_LT_53, op_NEG_59, op_SUB_67); + RzILOpEffect *gcc_expr_69 = BRANCH(op_EQ_33, EMPTY(), set_usr_field_call_43); + + // h_tmp530 = HYB(gcc_expr_if ((sextract64(((ut64) ((st16) ((Rs >> 0x0) & 0xffff))), 0x0, 0x8) == ((st64) ((st16) ((Rs >> 0x0) & 0xffff))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rs >> 0x0) & 0xffff))) < 0x0) ? (-(0x1 << 0x7)) : (0x1 << 0x7) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_71 = SETL("h_tmp530", cond_68); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st16) ((Rs >> 0x0) & ...; + RzILOpEffect *seq_72 = SEQN(2, gcc_expr_69, op_ASSIGN_hybrid_tmp_71); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xff << 0x0)))) | (((ut64) (((sextract64(((ut64) ((st16) ((Rs >> 0x0) & 0xffff))), 0x0, 0x8) == ((st64) ((st16) ((Rs >> 0x0) & 0xffff)))) ? ((st64) ((st16) ((Rs >> 0x0) & 0xffff))) : h_tmp530) & 0xff)) << 0x0))); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_8 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_6); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_39 = LOGAND(op_RSHIFT_37, SN(32, 0xffff)); + RzILOpPure *cond_74 = ITE(DUP(op_EQ_33), CAST(64, MSB(CAST(16, MSB(op_AND_39), DUP(op_AND_39))), CAST(16, MSB(DUP(op_AND_39)), DUP(op_AND_39))), VARL("h_tmp530")); + RzILOpPure *op_AND_76 = LOGAND(cond_74, SN(64, 0xff)); + RzILOpPure *op_LSHIFT_81 = SHIFTL0(CAST(64, IL_FALSE, op_AND_76), SN(32, 0)); + RzILOpPure *op_OR_83 = LOGOR(CAST(64, IL_FALSE, op_AND_8), op_LSHIFT_81); + RzILOpEffect *op_ASSIGN_85 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_83)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st16) ((Rs >> 0x0 ...; + RzILOpEffect *seq_86 = SEQN(2, seq_72, op_ASSIGN_85); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_129 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st16) ((Rs >> 0x10) & 0xffff))), 0x0, 0x8) == ((st64) ((st16) ((Rs >> 0x10) & 0xffff))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rs >> 0x10) & 0xffff))) < 0x0) ? (-(0x1 << 0x7)) : (0x1 << 0x7) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_102 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_104 = LOGAND(op_RSHIFT_102, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_114 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_116 = LOGAND(op_RSHIFT_114, SN(32, 0xffff)); + RzILOpPure *op_EQ_119 = EQ(SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_104), DUP(op_AND_104))), SN(32, 0), SN(32, 8)), CAST(64, MSB(CAST(16, MSB(op_AND_116), DUP(op_AND_116))), CAST(16, MSB(DUP(op_AND_116)), DUP(op_AND_116)))); + RzILOpPure *op_RSHIFT_133 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_135 = LOGAND(op_RSHIFT_133, SN(32, 0xffff)); + RzILOpPure *op_LT_139 = SLT(CAST(32, MSB(CAST(16, MSB(op_AND_135), DUP(op_AND_135))), CAST(16, MSB(DUP(op_AND_135)), DUP(op_AND_135))), SN(32, 0)); + RzILOpPure *op_LSHIFT_144 = SHIFTL0(SN(64, 1), SN(32, 7)); + RzILOpPure *op_NEG_145 = NEG(op_LSHIFT_144); + RzILOpPure *op_LSHIFT_150 = SHIFTL0(SN(64, 1), SN(32, 7)); + RzILOpPure *op_SUB_153 = SUB(op_LSHIFT_150, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_154 = ITE(op_LT_139, op_NEG_145, op_SUB_153); + RzILOpEffect *gcc_expr_155 = BRANCH(op_EQ_119, EMPTY(), set_usr_field_call_129); + + // h_tmp531 = HYB(gcc_expr_if ((sextract64(((ut64) ((st16) ((Rs >> 0x10) & 0xffff))), 0x0, 0x8) == ((st64) ((st16) ((Rs >> 0x10) & 0xffff))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rs >> 0x10) & 0xffff))) < 0x0) ? (-(0x1 << 0x7)) : (0x1 << 0x7) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_157 = SETL("h_tmp531", cond_154); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st16) ((Rs >> 0x10) & ...; + RzILOpEffect *seq_158 = SEQN(2, gcc_expr_155, op_ASSIGN_hybrid_tmp_157); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xff << 0x8)))) | (((ut64) (((sextract64(((ut64) ((st16) ((Rs >> 0x10) & 0xffff))), 0x0, 0x8) == ((st64) ((st16) ((Rs >> 0x10) & 0xffff)))) ? ((st64) ((st16) ((Rs >> 0x10) & 0xffff))) : h_tmp531) & 0xff)) << 0x8))); + RzILOpPure *op_LSHIFT_92 = SHIFTL0(SN(64, 0xff), SN(32, 8)); + RzILOpPure *op_NOT_93 = LOGNOT(op_LSHIFT_92); + RzILOpPure *op_AND_95 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_93); + RzILOpPure *op_RSHIFT_123 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_125 = LOGAND(op_RSHIFT_123, SN(32, 0xffff)); + RzILOpPure *cond_160 = ITE(DUP(op_EQ_119), CAST(64, MSB(CAST(16, MSB(op_AND_125), DUP(op_AND_125))), CAST(16, MSB(DUP(op_AND_125)), DUP(op_AND_125))), VARL("h_tmp531")); + RzILOpPure *op_AND_162 = LOGAND(cond_160, SN(64, 0xff)); + RzILOpPure *op_LSHIFT_167 = SHIFTL0(CAST(64, IL_FALSE, op_AND_162), SN(32, 8)); + RzILOpPure *op_OR_169 = LOGOR(CAST(64, IL_FALSE, op_AND_95), op_LSHIFT_167); + RzILOpEffect *op_ASSIGN_171 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_169)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st16) ((Rs >> 0x1 ...; + RzILOpEffect *seq_172 = SEQN(2, seq_158, op_ASSIGN_171); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xff << 0x10)))) | (((ut64) (((st64) 0x0) & 0xff)) << 0x10))); + RzILOpPure *op_LSHIFT_178 = SHIFTL0(SN(64, 0xff), SN(32, 16)); + RzILOpPure *op_NOT_179 = LOGNOT(op_LSHIFT_178); + RzILOpPure *op_AND_181 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_179); + RzILOpPure *op_AND_185 = LOGAND(CAST(64, MSB(SN(32, 0)), SN(32, 0)), SN(64, 0xff)); + RzILOpPure *op_LSHIFT_190 = SHIFTL0(CAST(64, IL_FALSE, op_AND_185), SN(32, 16)); + RzILOpPure *op_OR_192 = LOGOR(CAST(64, IL_FALSE, op_AND_181), op_LSHIFT_190); + RzILOpEffect *op_ASSIGN_194 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_192)); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xff << 0x18)))) | (((ut64) (((st64) 0x0) & 0xff)) << 0x18))); + RzILOpPure *op_LSHIFT_200 = SHIFTL0(SN(64, 0xff), SN(32, 24)); + RzILOpPure *op_NOT_201 = LOGNOT(op_LSHIFT_200); + RzILOpPure *op_AND_203 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_201); + RzILOpPure *op_AND_207 = LOGAND(CAST(64, MSB(SN(32, 0)), SN(32, 0)), SN(64, 0xff)); + RzILOpPure *op_LSHIFT_212 = SHIFTL0(CAST(64, IL_FALSE, op_AND_207), SN(32, 24)); + RzILOpPure *op_OR_214 = LOGOR(CAST(64, IL_FALSE, op_AND_203), op_LSHIFT_212); + RzILOpEffect *op_ASSIGN_216 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_214)); + + RzILOpEffect *instruction_sequence = SEQN(4, seq_86, seq_172, op_ASSIGN_194, op_ASSIGN_216); + return instruction_sequence; +} + +// Rd = vsathub(Rs) +RzILOpEffect *hex_il_op_s2_svsathub(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_43 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((extract64(((ut64) ((st16) ((Rs >> 0x0) & 0xffff))), 0x0, 0x8) == ((ut64) ((st16) ((Rs >> 0x0) & 0xffff))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rs >> 0x0) & 0xffff))) < 0x0) ? ((st64) 0x0) : (0x1 << 0x8) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_28 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_30 = LOGAND(op_RSHIFT_28, SN(32, 0xffff)); + RzILOpPure *op_EQ_33 = EQ(EXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_18), DUP(op_AND_18))), SN(32, 0), SN(32, 8)), CAST(64, IL_FALSE, CAST(16, MSB(op_AND_30), DUP(op_AND_30)))); + RzILOpPure *op_RSHIFT_47 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_49 = LOGAND(op_RSHIFT_47, SN(32, 0xffff)); + RzILOpPure *op_LT_53 = SLT(CAST(32, MSB(CAST(16, MSB(op_AND_49), DUP(op_AND_49))), CAST(16, MSB(DUP(op_AND_49)), DUP(op_AND_49))), SN(32, 0)); + RzILOpPure *op_LSHIFT_57 = SHIFTL0(SN(64, 1), SN(32, 8)); + RzILOpPure *op_SUB_60 = SUB(op_LSHIFT_57, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_62 = ITE(op_LT_53, CAST(64, MSB(SN(32, 0)), SN(32, 0)), op_SUB_60); + RzILOpEffect *gcc_expr_63 = BRANCH(op_EQ_33, EMPTY(), set_usr_field_call_43); + + // h_tmp532 = HYB(gcc_expr_if ((extract64(((ut64) ((st16) ((Rs >> 0x0) & 0xffff))), 0x0, 0x8) == ((ut64) ((st16) ((Rs >> 0x0) & 0xffff))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rs >> 0x0) & 0xffff))) < 0x0) ? ((st64) 0x0) : (0x1 << 0x8) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_65 = SETL("h_tmp532", cond_62); + + // seq(HYB(gcc_expr_if ((extract64(((ut64) ((st16) ((Rs >> 0x0) & 0 ...; + RzILOpEffect *seq_66 = SEQN(2, gcc_expr_63, op_ASSIGN_hybrid_tmp_65); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xff << 0x0)))) | (((ut64) (((extract64(((ut64) ((st16) ((Rs >> 0x0) & 0xffff))), 0x0, 0x8) == ((ut64) ((st16) ((Rs >> 0x0) & 0xffff)))) ? ((st64) ((st16) ((Rs >> 0x0) & 0xffff))) : h_tmp532) & 0xff)) << 0x0))); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_8 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_6); + RzILOpPure *op_RSHIFT_37 = SHIFTRA(DUP(Rs), SN(32, 0)); + RzILOpPure *op_AND_39 = LOGAND(op_RSHIFT_37, SN(32, 0xffff)); + RzILOpPure *cond_68 = ITE(DUP(op_EQ_33), CAST(64, MSB(CAST(16, MSB(op_AND_39), DUP(op_AND_39))), CAST(16, MSB(DUP(op_AND_39)), DUP(op_AND_39))), VARL("h_tmp532")); + RzILOpPure *op_AND_70 = LOGAND(cond_68, SN(64, 0xff)); + RzILOpPure *op_LSHIFT_75 = SHIFTL0(CAST(64, IL_FALSE, op_AND_70), SN(32, 0)); + RzILOpPure *op_OR_77 = LOGOR(CAST(64, IL_FALSE, op_AND_8), op_LSHIFT_75); + RzILOpEffect *op_ASSIGN_79 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_77)); + + // seq(seq(HYB(gcc_expr_if ((extract64(((ut64) ((st16) ((Rs >> 0x0) ...; + RzILOpEffect *seq_80 = SEQN(2, seq_66, op_ASSIGN_79); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_123 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((extract64(((ut64) ((st16) ((Rs >> 0x10) & 0xffff))), 0x0, 0x8) == ((ut64) ((st16) ((Rs >> 0x10) & 0xffff))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rs >> 0x10) & 0xffff))) < 0x0) ? ((st64) 0x0) : (0x1 << 0x8) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_96 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_98 = LOGAND(op_RSHIFT_96, SN(32, 0xffff)); + RzILOpPure *op_RSHIFT_108 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_110 = LOGAND(op_RSHIFT_108, SN(32, 0xffff)); + RzILOpPure *op_EQ_113 = EQ(EXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_98), DUP(op_AND_98))), SN(32, 0), SN(32, 8)), CAST(64, IL_FALSE, CAST(16, MSB(op_AND_110), DUP(op_AND_110)))); + RzILOpPure *op_RSHIFT_127 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_129 = LOGAND(op_RSHIFT_127, SN(32, 0xffff)); + RzILOpPure *op_LT_133 = SLT(CAST(32, MSB(CAST(16, MSB(op_AND_129), DUP(op_AND_129))), CAST(16, MSB(DUP(op_AND_129)), DUP(op_AND_129))), SN(32, 0)); + RzILOpPure *op_LSHIFT_137 = SHIFTL0(SN(64, 1), SN(32, 8)); + RzILOpPure *op_SUB_140 = SUB(op_LSHIFT_137, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_142 = ITE(op_LT_133, CAST(64, MSB(SN(32, 0)), SN(32, 0)), op_SUB_140); + RzILOpEffect *gcc_expr_143 = BRANCH(op_EQ_113, EMPTY(), set_usr_field_call_123); + + // h_tmp533 = HYB(gcc_expr_if ((extract64(((ut64) ((st16) ((Rs >> 0x10) & 0xffff))), 0x0, 0x8) == ((ut64) ((st16) ((Rs >> 0x10) & 0xffff))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rs >> 0x10) & 0xffff))) < 0x0) ? ((st64) 0x0) : (0x1 << 0x8) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_145 = SETL("h_tmp533", cond_142); + + // seq(HYB(gcc_expr_if ((extract64(((ut64) ((st16) ((Rs >> 0x10) & ...; + RzILOpEffect *seq_146 = SEQN(2, gcc_expr_143, op_ASSIGN_hybrid_tmp_145); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xff << 0x8)))) | (((ut64) (((extract64(((ut64) ((st16) ((Rs >> 0x10) & 0xffff))), 0x0, 0x8) == ((ut64) ((st16) ((Rs >> 0x10) & 0xffff)))) ? ((st64) ((st16) ((Rs >> 0x10) & 0xffff))) : h_tmp533) & 0xff)) << 0x8))); + RzILOpPure *op_LSHIFT_86 = SHIFTL0(SN(64, 0xff), SN(32, 8)); + RzILOpPure *op_NOT_87 = LOGNOT(op_LSHIFT_86); + RzILOpPure *op_AND_89 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_87); + RzILOpPure *op_RSHIFT_117 = SHIFTRA(DUP(Rs), SN(32, 16)); + RzILOpPure *op_AND_119 = LOGAND(op_RSHIFT_117, SN(32, 0xffff)); + RzILOpPure *cond_148 = ITE(DUP(op_EQ_113), CAST(64, MSB(CAST(16, MSB(op_AND_119), DUP(op_AND_119))), CAST(16, MSB(DUP(op_AND_119)), DUP(op_AND_119))), VARL("h_tmp533")); + RzILOpPure *op_AND_150 = LOGAND(cond_148, SN(64, 0xff)); + RzILOpPure *op_LSHIFT_155 = SHIFTL0(CAST(64, IL_FALSE, op_AND_150), SN(32, 8)); + RzILOpPure *op_OR_157 = LOGOR(CAST(64, IL_FALSE, op_AND_89), op_LSHIFT_155); + RzILOpEffect *op_ASSIGN_159 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_157)); + + // seq(seq(HYB(gcc_expr_if ((extract64(((ut64) ((st16) ((Rs >> 0x10 ...; + RzILOpEffect *seq_160 = SEQN(2, seq_146, op_ASSIGN_159); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xff << 0x10)))) | (((ut64) (((st64) 0x0) & 0xff)) << 0x10))); + RzILOpPure *op_LSHIFT_166 = SHIFTL0(SN(64, 0xff), SN(32, 16)); + RzILOpPure *op_NOT_167 = LOGNOT(op_LSHIFT_166); + RzILOpPure *op_AND_169 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_167); + RzILOpPure *op_AND_173 = LOGAND(CAST(64, MSB(SN(32, 0)), SN(32, 0)), SN(64, 0xff)); + RzILOpPure *op_LSHIFT_178 = SHIFTL0(CAST(64, IL_FALSE, op_AND_173), SN(32, 16)); + RzILOpPure *op_OR_180 = LOGOR(CAST(64, IL_FALSE, op_AND_169), op_LSHIFT_178); + RzILOpEffect *op_ASSIGN_182 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_180)); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xff << 0x18)))) | (((ut64) (((st64) 0x0) & 0xff)) << 0x18))); + RzILOpPure *op_LSHIFT_188 = SHIFTL0(SN(64, 0xff), SN(32, 24)); + RzILOpPure *op_NOT_189 = LOGNOT(op_LSHIFT_188); + RzILOpPure *op_AND_191 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_189); + RzILOpPure *op_AND_195 = LOGAND(CAST(64, MSB(SN(32, 0)), SN(32, 0)), SN(64, 0xff)); + RzILOpPure *op_LSHIFT_200 = SHIFTL0(CAST(64, IL_FALSE, op_AND_195), SN(32, 24)); + RzILOpPure *op_OR_202 = LOGOR(CAST(64, IL_FALSE, op_AND_191), op_LSHIFT_200); + RzILOpEffect *op_ASSIGN_204 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_202)); + + RzILOpEffect *instruction_sequence = SEQN(4, seq_80, seq_160, op_ASSIGN_182, op_ASSIGN_204); + return instruction_sequence; +} + +// Rx = tableidxb(Rs,Ii,II):raw +RzILOpEffect *hex_il_op_s2_tableidxb(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: st32 width; + RzILOpPure *S = SN(32, (st32)ISA2IMM(hi, 'S')); + // Declare: st32 offset; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + // Declare: st32 field; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // width = ((st32) u); + RzILOpEffect *op_ASSIGN_3 = SETL("width", CAST(32, IL_FALSE, VARL("u"))); + + // S = S; + RzILOpEffect *imm_assign_5 = SETL("S", S); + + // offset = S; + RzILOpEffect *op_ASSIGN_8 = SETL("offset", VARL("S")); + + // field = ((st32) ((width != 0x0) ? extract64(((offset < 0x0) ? ((((ut64) ((ut32) Rs)) << (-offset) - 0x1) << 0x1) : (((ut64) ((ut32) Rs)) >> offset)), 0x0, width) : ((ut64) 0x0))); + RzILOpPure *op_NE_10 = INV(EQ(VARL("width"), SN(32, 0))); + RzILOpPure *op_LT_12 = SLT(VARL("offset"), SN(32, 0)); + RzILOpPure *op_NEG_16 = NEG(VARL("offset")); + RzILOpPure *op_SUB_18 = SUB(op_NEG_16, SN(32, 1)); + RzILOpPure *op_LSHIFT_19 = SHIFTL0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, Rs)), op_SUB_18); + RzILOpPure *op_LSHIFT_21 = SHIFTL0(op_LSHIFT_19, SN(32, 1)); + RzILOpPure *op_RSHIFT_24 = SHIFTR0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, DUP(Rs))), VARL("offset")); + RzILOpPure *cond_25 = ITE(op_LT_12, op_LSHIFT_21, op_RSHIFT_24); + RzILOpPure *cond_30 = ITE(op_NE_10, EXTRACT64(cond_25, SN(32, 0), VARL("width")), CAST(64, IL_FALSE, SN(64, 0))); + RzILOpEffect *op_ASSIGN_32 = SETL("field", CAST(32, IL_FALSE, cond_30)); + + // Rx = ((st32) (width ? deposit64(((ut64) Rx), 0x0, width, ((ut64) field)) : ((ut64) Rx))); + RzILOpPure *cond_40 = ITE(NON_ZERO(VARL("width")), DEPOSIT64(CAST(64, IL_FALSE, READ_REG(pkt, Rx_op, false)), SN(32, 0), VARL("width"), CAST(64, IL_FALSE, VARL("field"))), CAST(64, IL_FALSE, READ_REG(pkt, Rx_op, false))); + RzILOpEffect *op_ASSIGN_42 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, cond_40)); + + RzILOpEffect *instruction_sequence = SEQN(6, imm_assign_0, imm_assign_5, op_ASSIGN_3, op_ASSIGN_8, op_ASSIGN_32, op_ASSIGN_42); + return instruction_sequence; +} + +// Rx = tableidxd(Rs,Ii,II):raw +RzILOpEffect *hex_il_op_s2_tableidxd(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: st32 width; + RzILOpPure *S = SN(32, (st32)ISA2IMM(hi, 'S')); + // Declare: st32 offset; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + // Declare: st32 field; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // width = ((st32) u); + RzILOpEffect *op_ASSIGN_3 = SETL("width", CAST(32, IL_FALSE, VARL("u"))); + + // S = S; + RzILOpEffect *imm_assign_5 = SETL("S", S); + + // offset = S + 0x3; + RzILOpPure *op_ADD_8 = ADD(VARL("S"), SN(32, 3)); + RzILOpEffect *op_ASSIGN_10 = SETL("offset", op_ADD_8); + + // field = ((st32) ((width != 0x0) ? extract64(((offset < 0x0) ? ((((ut64) ((ut32) Rs)) << (-offset) - 0x1) << 0x1) : (((ut64) ((ut32) Rs)) >> offset)), 0x0, width) : ((ut64) 0x0))); + RzILOpPure *op_NE_12 = INV(EQ(VARL("width"), SN(32, 0))); + RzILOpPure *op_LT_14 = SLT(VARL("offset"), SN(32, 0)); + RzILOpPure *op_NEG_18 = NEG(VARL("offset")); + RzILOpPure *op_SUB_20 = SUB(op_NEG_18, SN(32, 1)); + RzILOpPure *op_LSHIFT_21 = SHIFTL0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, Rs)), op_SUB_20); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(op_LSHIFT_21, SN(32, 1)); + RzILOpPure *op_RSHIFT_26 = SHIFTR0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, DUP(Rs))), VARL("offset")); + RzILOpPure *cond_27 = ITE(op_LT_14, op_LSHIFT_23, op_RSHIFT_26); + RzILOpPure *cond_32 = ITE(op_NE_12, EXTRACT64(cond_27, SN(32, 0), VARL("width")), CAST(64, IL_FALSE, SN(64, 0))); + RzILOpEffect *op_ASSIGN_34 = SETL("field", CAST(32, IL_FALSE, cond_32)); + + // Rx = ((st32) (width ? deposit64(((ut64) Rx), 0x3, width, ((ut64) field)) : ((ut64) Rx))); + RzILOpPure *cond_42 = ITE(NON_ZERO(VARL("width")), DEPOSIT64(CAST(64, IL_FALSE, READ_REG(pkt, Rx_op, false)), SN(32, 3), VARL("width"), CAST(64, IL_FALSE, VARL("field"))), CAST(64, IL_FALSE, READ_REG(pkt, Rx_op, false))); + RzILOpEffect *op_ASSIGN_44 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, cond_42)); + + RzILOpEffect *instruction_sequence = SEQN(6, imm_assign_0, imm_assign_5, op_ASSIGN_3, op_ASSIGN_10, op_ASSIGN_34, op_ASSIGN_44); + return instruction_sequence; +} + +// Rx = tableidxh(Rs,Ii,II):raw +RzILOpEffect *hex_il_op_s2_tableidxh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: st32 width; + RzILOpPure *S = SN(32, (st32)ISA2IMM(hi, 'S')); + // Declare: st32 offset; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + // Declare: st32 field; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // width = ((st32) u); + RzILOpEffect *op_ASSIGN_3 = SETL("width", CAST(32, IL_FALSE, VARL("u"))); + + // S = S; + RzILOpEffect *imm_assign_5 = SETL("S", S); + + // offset = S + 0x1; + RzILOpPure *op_ADD_8 = ADD(VARL("S"), SN(32, 1)); + RzILOpEffect *op_ASSIGN_10 = SETL("offset", op_ADD_8); + + // field = ((st32) ((width != 0x0) ? extract64(((offset < 0x0) ? ((((ut64) ((ut32) Rs)) << (-offset) - 0x1) << 0x1) : (((ut64) ((ut32) Rs)) >> offset)), 0x0, width) : ((ut64) 0x0))); + RzILOpPure *op_NE_12 = INV(EQ(VARL("width"), SN(32, 0))); + RzILOpPure *op_LT_14 = SLT(VARL("offset"), SN(32, 0)); + RzILOpPure *op_NEG_18 = NEG(VARL("offset")); + RzILOpPure *op_SUB_20 = SUB(op_NEG_18, SN(32, 1)); + RzILOpPure *op_LSHIFT_21 = SHIFTL0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, Rs)), op_SUB_20); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(op_LSHIFT_21, SN(32, 1)); + RzILOpPure *op_RSHIFT_26 = SHIFTR0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, DUP(Rs))), VARL("offset")); + RzILOpPure *cond_27 = ITE(op_LT_14, op_LSHIFT_23, op_RSHIFT_26); + RzILOpPure *cond_32 = ITE(op_NE_12, EXTRACT64(cond_27, SN(32, 0), VARL("width")), CAST(64, IL_FALSE, SN(64, 0))); + RzILOpEffect *op_ASSIGN_34 = SETL("field", CAST(32, IL_FALSE, cond_32)); + + // Rx = ((st32) (width ? deposit64(((ut64) Rx), 0x1, width, ((ut64) field)) : ((ut64) Rx))); + RzILOpPure *cond_42 = ITE(NON_ZERO(VARL("width")), DEPOSIT64(CAST(64, IL_FALSE, READ_REG(pkt, Rx_op, false)), SN(32, 1), VARL("width"), CAST(64, IL_FALSE, VARL("field"))), CAST(64, IL_FALSE, READ_REG(pkt, Rx_op, false))); + RzILOpEffect *op_ASSIGN_44 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, cond_42)); + + RzILOpEffect *instruction_sequence = SEQN(6, imm_assign_0, imm_assign_5, op_ASSIGN_3, op_ASSIGN_10, op_ASSIGN_34, op_ASSIGN_44); + return instruction_sequence; +} + +// Rx = tableidxw(Rs,Ii,II):raw +RzILOpEffect *hex_il_op_s2_tableidxw(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: st32 width; + RzILOpPure *S = SN(32, (st32)ISA2IMM(hi, 'S')); + // Declare: st32 offset; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + // Declare: st32 field; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // width = ((st32) u); + RzILOpEffect *op_ASSIGN_3 = SETL("width", CAST(32, IL_FALSE, VARL("u"))); + + // S = S; + RzILOpEffect *imm_assign_5 = SETL("S", S); + + // offset = S + 0x2; + RzILOpPure *op_ADD_8 = ADD(VARL("S"), SN(32, 2)); + RzILOpEffect *op_ASSIGN_10 = SETL("offset", op_ADD_8); + + // field = ((st32) ((width != 0x0) ? extract64(((offset < 0x0) ? ((((ut64) ((ut32) Rs)) << (-offset) - 0x1) << 0x1) : (((ut64) ((ut32) Rs)) >> offset)), 0x0, width) : ((ut64) 0x0))); + RzILOpPure *op_NE_12 = INV(EQ(VARL("width"), SN(32, 0))); + RzILOpPure *op_LT_14 = SLT(VARL("offset"), SN(32, 0)); + RzILOpPure *op_NEG_18 = NEG(VARL("offset")); + RzILOpPure *op_SUB_20 = SUB(op_NEG_18, SN(32, 1)); + RzILOpPure *op_LSHIFT_21 = SHIFTL0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, Rs)), op_SUB_20); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(op_LSHIFT_21, SN(32, 1)); + RzILOpPure *op_RSHIFT_26 = SHIFTR0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, DUP(Rs))), VARL("offset")); + RzILOpPure *cond_27 = ITE(op_LT_14, op_LSHIFT_23, op_RSHIFT_26); + RzILOpPure *cond_32 = ITE(op_NE_12, EXTRACT64(cond_27, SN(32, 0), VARL("width")), CAST(64, IL_FALSE, SN(64, 0))); + RzILOpEffect *op_ASSIGN_34 = SETL("field", CAST(32, IL_FALSE, cond_32)); + + // Rx = ((st32) (width ? deposit64(((ut64) Rx), 0x2, width, ((ut64) field)) : ((ut64) Rx))); + RzILOpPure *cond_42 = ITE(NON_ZERO(VARL("width")), DEPOSIT64(CAST(64, IL_FALSE, READ_REG(pkt, Rx_op, false)), SN(32, 2), VARL("width"), CAST(64, IL_FALSE, VARL("field"))), CAST(64, IL_FALSE, READ_REG(pkt, Rx_op, false))); + RzILOpEffect *op_ASSIGN_44 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, cond_42)); + + RzILOpEffect *instruction_sequence = SEQN(6, imm_assign_0, imm_assign_5, op_ASSIGN_3, op_ASSIGN_10, op_ASSIGN_34, op_ASSIGN_44); + return instruction_sequence; +} + +// Rd = togglebit(Rs,Ii) +RzILOpEffect *hex_il_op_s2_togglebit_i(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // Rd = (Rs ^ (0x1 << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(32, 1), VARL("u")); + RzILOpPure *op_XOR_6 = LOGXOR(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rd_op, op_XOR_6); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_3, op_ASSIGN_7); + return instruction_sequence; +} + +// Rd = togglebit(Rs,Rt) +RzILOpEffect *hex_il_op_s2_togglebit_r(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Rd = ((st32) (((ut64) Rs) ^ ((sextract64(((ut64) Rt), 0x0, 0x7) < ((st64) 0x0)) ? ((((ut64) ((ut32) 0x1)) >> (-sextract64(((ut64) Rt), 0x0, 0x7)) - ((st64) 0x1)) >> 0x1) : (((ut64) ((ut32) 0x1)) << sextract64(((ut64) Rt), 0x0, 0x7))))); + RzILOpPure *op_LT_13 = SLT(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7)), CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_NEG_25 = NEG(SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7))); + RzILOpPure *op_SUB_28 = SUB(op_NEG_25, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *op_RSHIFT_29 = SHIFTR0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, SN(32, 1))), op_SUB_28); + RzILOpPure *op_RSHIFT_31 = SHIFTR0(op_RSHIFT_29, SN(32, 1)); + RzILOpPure *op_LSHIFT_43 = SHIFTL0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, SN(32, 1))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7))); + RzILOpPure *cond_44 = ITE(op_LT_13, op_RSHIFT_31, op_LSHIFT_43); + RzILOpPure *op_XOR_46 = LOGXOR(CAST(64, IL_FALSE, Rs), cond_44); + RzILOpEffect *op_ASSIGN_48 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_XOR_46)); + + RzILOpEffect *instruction_sequence = op_ASSIGN_48; + return instruction_sequence; +} + +// Pd = tstbit(Rs,Ii) +RzILOpEffect *hex_il_op_s2_tstbit_i(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // Pd = ((st8) (((Rs & (0x1 << u)) != 0x0) ? 0xff : 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(32, 1), VARL("u")); + RzILOpPure *op_AND_6 = LOGAND(Rs, op_LSHIFT_5); + RzILOpPure *op_NE_8 = INV(EQ(op_AND_6, SN(32, 0))); + RzILOpPure *cond_11 = ITE(op_NE_8, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_11), DUP(cond_11))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_3, op_ASSIGN_13); + return instruction_sequence; +} + +// Pd = tstbit(Rs,Rt) +RzILOpEffect *hex_il_op_s2_tstbit_r(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Pd = ((st8) (((((ut64) ((ut32) Rs)) & ((sextract64(((ut64) Rt), 0x0, 0x7) < ((st64) 0x0)) ? ((((ut64) ((ut32) 0x1)) >> (-sextract64(((ut64) Rt), 0x0, 0x7)) - ((st64) 0x1)) >> 0x1) : (((ut64) ((ut32) 0x1)) << sextract64(((ut64) Rt), 0x0, 0x7)))) != ((ut64) 0x0)) ? 0xff : 0x0)); + RzILOpPure *op_LT_15 = SLT(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7)), CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_NEG_27 = NEG(SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7))); + RzILOpPure *op_SUB_30 = SUB(op_NEG_27, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *op_RSHIFT_31 = SHIFTR0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, SN(32, 1))), op_SUB_30); + RzILOpPure *op_RSHIFT_33 = SHIFTR0(op_RSHIFT_31, SN(32, 1)); + RzILOpPure *op_LSHIFT_45 = SHIFTL0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, SN(32, 1))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7))); + RzILOpPure *cond_46 = ITE(op_LT_15, op_RSHIFT_33, op_LSHIFT_45); + RzILOpPure *op_AND_47 = LOGAND(CAST(64, IL_FALSE, CAST(32, IL_FALSE, Rs)), cond_46); + RzILOpPure *op_NE_50 = INV(EQ(op_AND_47, CAST(64, IL_FALSE, SN(32, 0)))); + RzILOpPure *cond_53 = ITE(op_NE_50, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_55 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_53), DUP(cond_53))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_55; + return instruction_sequence; +} + +// Rdd = valignb(Rtt,Rss,Ii) +RzILOpEffect *hex_il_op_s2_valignib(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rdd = ((st64) (((u * ((ut32) 0x8) >= ((ut32) 0x40)) ? ((ut64) 0x0) : (((ut64) Rss) >> u * ((ut32) 0x8))) | ((ut64) ((((ut32) 0x8) - u * ((ut32) 0x8) >= ((ut32) 0x40)) ? ((st64) 0x0) : (Rtt << ((ut32) 0x8) - u * ((ut32) 0x8)))))); + RzILOpPure *op_MUL_5 = MUL(VARL("u"), CAST(32, IL_FALSE, SN(32, 8))); + RzILOpPure *op_GE_11 = UGE(op_MUL_5, CAST(32, IL_FALSE, SN(32, 0x40))); + RzILOpPure *op_MUL_16 = MUL(VARL("u"), CAST(32, IL_FALSE, SN(32, 8))); + RzILOpPure *op_RSHIFT_17 = SHIFTR0(CAST(64, IL_FALSE, Rss), op_MUL_16); + RzILOpPure *cond_19 = ITE(op_GE_11, CAST(64, IL_FALSE, SN(32, 0)), op_RSHIFT_17); + RzILOpPure *op_SUB_22 = SUB(CAST(32, IL_FALSE, SN(32, 8)), VARL("u")); + RzILOpPure *op_MUL_25 = MUL(op_SUB_22, CAST(32, IL_FALSE, SN(32, 8))); + RzILOpPure *op_GE_31 = UGE(op_MUL_25, CAST(32, IL_FALSE, SN(32, 0x40))); + RzILOpPure *op_SUB_35 = SUB(CAST(32, IL_FALSE, SN(32, 8)), VARL("u")); + RzILOpPure *op_MUL_38 = MUL(op_SUB_35, CAST(32, IL_FALSE, SN(32, 8))); + RzILOpPure *op_LSHIFT_39 = SHIFTL0(Rtt, op_MUL_38); + RzILOpPure *cond_41 = ITE(op_GE_31, CAST(64, MSB(SN(32, 0)), SN(32, 0)), op_LSHIFT_39); + RzILOpPure *op_OR_43 = LOGOR(cond_19, CAST(64, IL_FALSE, cond_41)); + RzILOpEffect *op_ASSIGN_45 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_43)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_45); + return instruction_sequence; +} + +// Rdd = valignb(Rtt,Rss,Pu) +RzILOpEffect *hex_il_op_s2_valignrb(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // Rdd = ((st64) ((((((st32) Pu) & 0x7) * 0x8 >= 0x40) ? ((ut64) 0x0) : (((ut64) Rss) >> (((st32) Pu) & 0x7) * 0x8)) | ((ut64) ((0x8 - (((st32) Pu) & 0x7) * 0x8 >= 0x40) ? ((st64) 0x0) : (Rtt << 0x8 - (((st32) Pu) & 0x7) * 0x8))))); + RzILOpPure *op_AND_4 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 7)); + RzILOpPure *op_MUL_6 = MUL(op_AND_4, SN(32, 8)); + RzILOpPure *op_GE_11 = SGE(op_MUL_6, SN(32, 0x40)); + RzILOpPure *op_AND_16 = LOGAND(CAST(32, MSB(DUP(Pu)), DUP(Pu)), SN(32, 7)); + RzILOpPure *op_MUL_18 = MUL(op_AND_16, SN(32, 8)); + RzILOpPure *op_RSHIFT_19 = SHIFTR0(CAST(64, IL_FALSE, Rss), op_MUL_18); + RzILOpPure *cond_21 = ITE(op_GE_11, CAST(64, IL_FALSE, SN(32, 0)), op_RSHIFT_19); + RzILOpPure *op_AND_25 = LOGAND(CAST(32, MSB(DUP(Pu)), DUP(Pu)), SN(32, 7)); + RzILOpPure *op_SUB_26 = SUB(SN(32, 8), op_AND_25); + RzILOpPure *op_MUL_28 = MUL(op_SUB_26, SN(32, 8)); + RzILOpPure *op_GE_33 = SGE(op_MUL_28, SN(32, 0x40)); + RzILOpPure *op_AND_38 = LOGAND(CAST(32, MSB(DUP(Pu)), DUP(Pu)), SN(32, 7)); + RzILOpPure *op_SUB_39 = SUB(SN(32, 8), op_AND_38); + RzILOpPure *op_MUL_41 = MUL(op_SUB_39, SN(32, 8)); + RzILOpPure *op_LSHIFT_42 = SHIFTL0(Rtt, op_MUL_41); + RzILOpPure *cond_44 = ITE(op_GE_33, CAST(64, MSB(SN(32, 0)), SN(32, 0)), op_LSHIFT_42); + RzILOpPure *op_OR_46 = LOGOR(cond_21, CAST(64, IL_FALSE, cond_44)); + RzILOpEffect *op_ASSIGN_48 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_46)); + + RzILOpEffect *instruction_sequence = op_ASSIGN_48; + return instruction_sequence; +} + +// Rdd = vcnegh(Rss,Rt) +RzILOpEffect *hex_il_op_s2_vcnegh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp534 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp534", VARL("i")); + + // seq(h_tmp534 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_60 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))))), 0x0, 0x10) == ((st64) (-((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((-((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_MUL_25 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_26 = SHIFTRA(Rss, op_MUL_25); + RzILOpPure *op_AND_29 = LOGAND(op_RSHIFT_26, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_32 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_29), DUP(op_AND_29))), CAST(16, MSB(DUP(op_AND_29)), DUP(op_AND_29)))); + RzILOpPure *op_MUL_39 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_40 = SHIFTRA(DUP(Rss), op_MUL_39); + RzILOpPure *op_AND_43 = LOGAND(op_RSHIFT_40, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_46 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_43), DUP(op_AND_43))), CAST(16, MSB(DUP(op_AND_43)), DUP(op_AND_43)))); + RzILOpPure *op_EQ_48 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_NEG_32), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_NEG_46), DUP(op_NEG_46))); + RzILOpPure *op_MUL_62 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_63 = SHIFTRA(DUP(Rss), op_MUL_62); + RzILOpPure *op_AND_66 = LOGAND(op_RSHIFT_63, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_69 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_66), DUP(op_AND_66))), CAST(16, MSB(DUP(op_AND_66)), DUP(op_AND_66)))); + RzILOpPure *op_LT_71 = SLT(op_NEG_69, SN(32, 0)); + RzILOpPure *op_LSHIFT_76 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_77 = NEG(op_LSHIFT_76); + RzILOpPure *op_LSHIFT_82 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_85 = SUB(op_LSHIFT_82, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_86 = ITE(op_LT_71, op_NEG_77, op_SUB_85); + RzILOpEffect *gcc_expr_87 = BRANCH(op_EQ_48, EMPTY(), set_usr_field_call_60); + + // h_tmp535 = HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))))), 0x0, 0x10) == ((st64) (-((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((-((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_89 = SETL("h_tmp535", cond_86); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ((Rss ...; + RzILOpEffect *seq_90 = SEQN(2, gcc_expr_87, op_ASSIGN_hybrid_tmp_89); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((sextract64(((ut64) (-((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))))), 0x0, 0x10) == ((st64) (-((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))))) ? ((st64) (-((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))))) : h_tmp535) & ((st64) 0xffff))) << i * 0x10))); + RzILOpPure *op_MUL_16 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_17 = SHIFTL0(SN(64, 0xffff), op_MUL_16); + RzILOpPure *op_NOT_18 = LOGNOT(op_LSHIFT_17); + RzILOpPure *op_AND_19 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_18); + RzILOpPure *op_MUL_50 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_51 = SHIFTRA(DUP(Rss), op_MUL_50); + RzILOpPure *op_AND_54 = LOGAND(op_RSHIFT_51, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_57 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_54), DUP(op_AND_54))), CAST(16, MSB(DUP(op_AND_54)), DUP(op_AND_54)))); + RzILOpPure *cond_92 = ITE(DUP(op_EQ_48), CAST(64, MSB(op_NEG_57), DUP(op_NEG_57)), VARL("h_tmp535")); + RzILOpPure *op_AND_95 = LOGAND(cond_92, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_98 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_99 = SHIFTL0(CAST(64, IL_FALSE, op_AND_95), op_MUL_98); + RzILOpPure *op_OR_101 = LOGOR(CAST(64, IL_FALSE, op_AND_19), op_LSHIFT_99); + RzILOpEffect *op_ASSIGN_103 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_101)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ( ...; + RzILOpEffect *seq_104 = SEQN(2, seq_90, op_ASSIGN_103); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_108 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_109 = SHIFTL0(SN(64, 0xffff), op_MUL_108); + RzILOpPure *op_NOT_110 = LOGNOT(op_LSHIFT_109); + RzILOpPure *op_AND_111 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_110); + RzILOpPure *op_MUL_113 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_114 = SHIFTRA(DUP(Rss), op_MUL_113); + RzILOpPure *op_AND_117 = LOGAND(op_RSHIFT_114, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_121 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_117), DUP(op_AND_117))), CAST(16, MSB(DUP(op_AND_117)), DUP(op_AND_117))), SN(32, 0xffff)); + RzILOpPure *op_MUL_124 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_125 = SHIFTL0(CAST(64, IL_FALSE, op_AND_121), op_MUL_124); + RzILOpPure *op_OR_127 = LOGOR(CAST(64, IL_FALSE, op_AND_111), op_LSHIFT_125); + RzILOpEffect *op_ASSIGN_129 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_127)); + + // seq(seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st1 ...; + RzILOpEffect *seq_then_131 = seq_104; + + // seq(Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (( ...; + RzILOpEffect *seq_else_132 = op_ASSIGN_129; + + // if (((Rt >> i) & 0x1)) {seq(seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st1 ...} else {seq(Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (( ...}; + RzILOpPure *op_RSHIFT_10 = SHIFTRA(Rt, VARL("i")); + RzILOpPure *op_AND_12 = LOGAND(op_RSHIFT_10, SN(32, 1)); + RzILOpEffect *branch_133 = BRANCH(NON_ZERO(op_AND_12), seq_then_131, seq_else_132); + + // seq(h_tmp534; if (((Rt >> i) & 0x1)) {seq(seq(seq(HYB(gcc_expr_i ...; + RzILOpEffect *seq_134 = branch_133; + + // seq(seq(h_tmp534; if (((Rt >> i) & 0x1)) {seq(seq(seq(HYB(gcc_ex ...; + RzILOpEffect *seq_135 = SEQN(2, seq_134, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp534; if (((Rt >> i) & 0x1)) {seq(seq(seq(HYB(gcc_ex ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_136 = REPEAT(op_LT_4, seq_135); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp534; if (((Rt >> i ...; + RzILOpEffect *seq_137 = SEQN(2, op_ASSIGN_2, for_136); + + RzILOpEffect *instruction_sequence = seq_137; + return instruction_sequence; +} + +// Rdd = vcrotate(Rss,Rt) +RzILOpEffect *hex_il_op_s2_vcrotate(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut8 tmp; + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // tmp = ((ut8) extract64(((ut64) Rt), 0x0, 0x2)); + RzILOpEffect *op_ASSIGN_17 = SETL("tmp", CAST(8, IL_FALSE, EXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 2)))); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x0)))) | (((ut64) (((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) & 0xffff)) << 0x0))); + RzILOpPure *op_LSHIFT_26 = SHIFTL0(SN(64, 0xffff), SN(32, 0)); + RzILOpPure *op_NOT_27 = LOGNOT(op_LSHIFT_26); + RzILOpPure *op_AND_28 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_27); + RzILOpPure *op_RSHIFT_33 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_36 = LOGAND(op_RSHIFT_33, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_40 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_36), DUP(op_AND_36))), CAST(16, MSB(DUP(op_AND_36)), DUP(op_AND_36))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_45 = SHIFTL0(CAST(64, IL_FALSE, op_AND_40), SN(32, 0)); + RzILOpPure *op_OR_47 = LOGOR(CAST(64, IL_FALSE, op_AND_28), op_LSHIFT_45); + RzILOpEffect *op_ASSIGN_49 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_47)); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x10)))) | (((ut64) (((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) & 0xffff)) << 0x10))); + RzILOpPure *op_LSHIFT_55 = SHIFTL0(SN(64, 0xffff), SN(32, 16)); + RzILOpPure *op_NOT_56 = LOGNOT(op_LSHIFT_55); + RzILOpPure *op_AND_57 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_56); + RzILOpPure *op_RSHIFT_61 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_64 = LOGAND(op_RSHIFT_61, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_68 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_64), DUP(op_AND_64))), CAST(16, MSB(DUP(op_AND_64)), DUP(op_AND_64))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_73 = SHIFTL0(CAST(64, IL_FALSE, op_AND_68), SN(32, 16)); + RzILOpPure *op_OR_75 = LOGOR(CAST(64, IL_FALSE, op_AND_57), op_LSHIFT_73); + RzILOpEffect *op_ASSIGN_77 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_75)); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x0)))) | (((ut64) (((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) & 0xffff)) << 0x0))); + RzILOpPure *op_LSHIFT_86 = SHIFTL0(SN(64, 0xffff), SN(32, 0)); + RzILOpPure *op_NOT_87 = LOGNOT(op_LSHIFT_86); + RzILOpPure *op_AND_88 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_87); + RzILOpPure *op_RSHIFT_92 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_95 = LOGAND(op_RSHIFT_92, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_99 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_95), DUP(op_AND_95))), CAST(16, MSB(DUP(op_AND_95)), DUP(op_AND_95))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_104 = SHIFTL0(CAST(64, IL_FALSE, op_AND_99), SN(32, 0)); + RzILOpPure *op_OR_106 = LOGOR(CAST(64, IL_FALSE, op_AND_88), op_LSHIFT_104); + RzILOpEffect *op_ASSIGN_108 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_106)); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_159 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))))), 0x0, 0x10) == ((st64) (-((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((-((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff))))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_123 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_126 = LOGAND(op_RSHIFT_123, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_129 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_126), DUP(op_AND_126))), CAST(16, MSB(DUP(op_AND_126)), DUP(op_AND_126)))); + RzILOpPure *op_RSHIFT_138 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_141 = LOGAND(op_RSHIFT_138, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_144 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_141), DUP(op_AND_141))), CAST(16, MSB(DUP(op_AND_141)), DUP(op_AND_141)))); + RzILOpPure *op_EQ_146 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_NEG_129), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_NEG_144), DUP(op_NEG_144))); + RzILOpPure *op_RSHIFT_163 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_166 = LOGAND(op_RSHIFT_163, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_169 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_166), DUP(op_AND_166))), CAST(16, MSB(DUP(op_AND_166)), DUP(op_AND_166)))); + RzILOpPure *op_LT_171 = SLT(op_NEG_169, SN(32, 0)); + RzILOpPure *op_LSHIFT_176 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_177 = NEG(op_LSHIFT_176); + RzILOpPure *op_LSHIFT_182 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_185 = SUB(op_LSHIFT_182, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_186 = ITE(op_LT_171, op_NEG_177, op_SUB_185); + RzILOpEffect *gcc_expr_187 = BRANCH(op_EQ_146, EMPTY(), set_usr_field_call_159); + + // h_tmp536 = HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))))), 0x0, 0x10) == ((st64) (-((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((-((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff))))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_189 = SETL("h_tmp536", cond_186); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ((Rss ...; + RzILOpEffect *seq_190 = SEQN(2, gcc_expr_187, op_ASSIGN_hybrid_tmp_189); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x10)))) | (((ut64) (((sextract64(((ut64) (-((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))))), 0x0, 0x10) == ((st64) (-((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff))))))) ? ((st64) (-((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))))) : h_tmp536) & ((st64) 0xffff))) << 0x10))); + RzILOpPure *op_LSHIFT_114 = SHIFTL0(SN(64, 0xffff), SN(32, 16)); + RzILOpPure *op_NOT_115 = LOGNOT(op_LSHIFT_114); + RzILOpPure *op_AND_116 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_115); + RzILOpPure *op_RSHIFT_150 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_153 = LOGAND(op_RSHIFT_150, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_156 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_153), DUP(op_AND_153))), CAST(16, MSB(DUP(op_AND_153)), DUP(op_AND_153)))); + RzILOpPure *cond_192 = ITE(DUP(op_EQ_146), CAST(64, MSB(op_NEG_156), DUP(op_NEG_156)), VARL("h_tmp536")); + RzILOpPure *op_AND_195 = LOGAND(cond_192, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_200 = SHIFTL0(CAST(64, IL_FALSE, op_AND_195), SN(32, 16)); + RzILOpPure *op_OR_202 = LOGOR(CAST(64, IL_FALSE, op_AND_116), op_LSHIFT_200); + RzILOpEffect *op_ASSIGN_204 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_202)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ( ...; + RzILOpEffect *seq_205 = SEQN(2, seq_190, op_ASSIGN_204); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_259 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))))), 0x0, 0x10) == ((st64) (-((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((-((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff))))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_223 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_226 = LOGAND(op_RSHIFT_223, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_229 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_226), DUP(op_AND_226))), CAST(16, MSB(DUP(op_AND_226)), DUP(op_AND_226)))); + RzILOpPure *op_RSHIFT_238 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_241 = LOGAND(op_RSHIFT_238, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_244 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_241), DUP(op_AND_241))), CAST(16, MSB(DUP(op_AND_241)), DUP(op_AND_241)))); + RzILOpPure *op_EQ_246 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_NEG_229), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_NEG_244), DUP(op_NEG_244))); + RzILOpPure *op_RSHIFT_263 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_266 = LOGAND(op_RSHIFT_263, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_269 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_266), DUP(op_AND_266))), CAST(16, MSB(DUP(op_AND_266)), DUP(op_AND_266)))); + RzILOpPure *op_LT_271 = SLT(op_NEG_269, SN(32, 0)); + RzILOpPure *op_LSHIFT_276 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_277 = NEG(op_LSHIFT_276); + RzILOpPure *op_LSHIFT_282 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_285 = SUB(op_LSHIFT_282, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_286 = ITE(op_LT_271, op_NEG_277, op_SUB_285); + RzILOpEffect *gcc_expr_287 = BRANCH(op_EQ_246, EMPTY(), set_usr_field_call_259); + + // h_tmp537 = HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))))), 0x0, 0x10) == ((st64) (-((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((-((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff))))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_289 = SETL("h_tmp537", cond_286); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ((Rss ...; + RzILOpEffect *seq_290 = SEQN(2, gcc_expr_287, op_ASSIGN_hybrid_tmp_289); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x0)))) | (((ut64) (((sextract64(((ut64) (-((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))))), 0x0, 0x10) == ((st64) (-((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff))))))) ? ((st64) (-((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))))) : h_tmp537) & ((st64) 0xffff))) << 0x0))); + RzILOpPure *op_LSHIFT_214 = SHIFTL0(SN(64, 0xffff), SN(32, 0)); + RzILOpPure *op_NOT_215 = LOGNOT(op_LSHIFT_214); + RzILOpPure *op_AND_216 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_215); + RzILOpPure *op_RSHIFT_250 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_253 = LOGAND(op_RSHIFT_250, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_256 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_253), DUP(op_AND_253))), CAST(16, MSB(DUP(op_AND_253)), DUP(op_AND_253)))); + RzILOpPure *cond_292 = ITE(DUP(op_EQ_246), CAST(64, MSB(op_NEG_256), DUP(op_NEG_256)), VARL("h_tmp537")); + RzILOpPure *op_AND_295 = LOGAND(cond_292, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_300 = SHIFTL0(CAST(64, IL_FALSE, op_AND_295), SN(32, 0)); + RzILOpPure *op_OR_302 = LOGOR(CAST(64, IL_FALSE, op_AND_216), op_LSHIFT_300); + RzILOpEffect *op_ASSIGN_304 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_302)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ( ...; + RzILOpEffect *seq_305 = SEQN(2, seq_290, op_ASSIGN_304); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x10)))) | (((ut64) (((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) & 0xffff)) << 0x10))); + RzILOpPure *op_LSHIFT_311 = SHIFTL0(SN(64, 0xffff), SN(32, 16)); + RzILOpPure *op_NOT_312 = LOGNOT(op_LSHIFT_311); + RzILOpPure *op_AND_313 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_312); + RzILOpPure *op_RSHIFT_317 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_320 = LOGAND(op_RSHIFT_317, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_324 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_320), DUP(op_AND_320))), CAST(16, MSB(DUP(op_AND_320)), DUP(op_AND_320))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_329 = SHIFTL0(CAST(64, IL_FALSE, op_AND_324), SN(32, 16)); + RzILOpPure *op_OR_331 = LOGOR(CAST(64, IL_FALSE, op_AND_313), op_LSHIFT_329); + RzILOpEffect *op_ASSIGN_333 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_331)); + + // seq({}); + RzILOpEffect *seq_then_338 = EMPTY(); + + // if ((((st32) tmp) != 0x3)) {seq({})} else {{}}; + RzILOpPure *op_NE_337 = INV(EQ(CAST(32, IL_FALSE, VARL("tmp")), SN(32, 3))); + RzILOpEffect *branch_339 = BRANCH(op_NE_337, seq_then_338, EMPTY()); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_389 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))))), 0x0, 0x10) == ((st64) (-((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((-((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff))))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_353 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_356 = LOGAND(op_RSHIFT_353, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_359 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_356), DUP(op_AND_356))), CAST(16, MSB(DUP(op_AND_356)), DUP(op_AND_356)))); + RzILOpPure *op_RSHIFT_368 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_371 = LOGAND(op_RSHIFT_368, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_374 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_371), DUP(op_AND_371))), CAST(16, MSB(DUP(op_AND_371)), DUP(op_AND_371)))); + RzILOpPure *op_EQ_376 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_NEG_359), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_NEG_374), DUP(op_NEG_374))); + RzILOpPure *op_RSHIFT_393 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_396 = LOGAND(op_RSHIFT_393, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_399 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_396), DUP(op_AND_396))), CAST(16, MSB(DUP(op_AND_396)), DUP(op_AND_396)))); + RzILOpPure *op_LT_401 = SLT(op_NEG_399, SN(32, 0)); + RzILOpPure *op_LSHIFT_406 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_407 = NEG(op_LSHIFT_406); + RzILOpPure *op_LSHIFT_412 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_415 = SUB(op_LSHIFT_412, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_416 = ITE(op_LT_401, op_NEG_407, op_SUB_415); + RzILOpEffect *gcc_expr_417 = BRANCH(op_EQ_376, EMPTY(), set_usr_field_call_389); + + // h_tmp538 = HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))))), 0x0, 0x10) == ((st64) (-((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((-((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff))))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_419 = SETL("h_tmp538", cond_416); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ((Rss ...; + RzILOpEffect *seq_420 = SEQN(2, gcc_expr_417, op_ASSIGN_hybrid_tmp_419); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x0)))) | (((ut64) (((sextract64(((ut64) (-((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))))), 0x0, 0x10) == ((st64) (-((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff))))))) ? ((st64) (-((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))))) : h_tmp538) & ((st64) 0xffff))) << 0x0))); + RzILOpPure *op_LSHIFT_344 = SHIFTL0(SN(64, 0xffff), SN(32, 0)); + RzILOpPure *op_NOT_345 = LOGNOT(op_LSHIFT_344); + RzILOpPure *op_AND_346 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_345); + RzILOpPure *op_RSHIFT_380 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_383 = LOGAND(op_RSHIFT_380, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_386 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_383), DUP(op_AND_383))), CAST(16, MSB(DUP(op_AND_383)), DUP(op_AND_383)))); + RzILOpPure *cond_422 = ITE(DUP(op_EQ_376), CAST(64, MSB(op_NEG_386), DUP(op_NEG_386)), VARL("h_tmp538")); + RzILOpPure *op_AND_425 = LOGAND(cond_422, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_430 = SHIFTL0(CAST(64, IL_FALSE, op_AND_425), SN(32, 0)); + RzILOpPure *op_OR_432 = LOGOR(CAST(64, IL_FALSE, op_AND_346), op_LSHIFT_430); + RzILOpEffect *op_ASSIGN_434 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_432)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ( ...; + RzILOpEffect *seq_435 = SEQN(2, seq_420, op_ASSIGN_434); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_486 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))))), 0x0, 0x10) == ((st64) (-((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((-((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff))))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_450 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_453 = LOGAND(op_RSHIFT_450, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_456 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_453), DUP(op_AND_453))), CAST(16, MSB(DUP(op_AND_453)), DUP(op_AND_453)))); + RzILOpPure *op_RSHIFT_465 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_468 = LOGAND(op_RSHIFT_465, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_471 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_468), DUP(op_AND_468))), CAST(16, MSB(DUP(op_AND_468)), DUP(op_AND_468)))); + RzILOpPure *op_EQ_473 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_NEG_456), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_NEG_471), DUP(op_NEG_471))); + RzILOpPure *op_RSHIFT_490 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_493 = LOGAND(op_RSHIFT_490, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_496 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_493), DUP(op_AND_493))), CAST(16, MSB(DUP(op_AND_493)), DUP(op_AND_493)))); + RzILOpPure *op_LT_498 = SLT(op_NEG_496, SN(32, 0)); + RzILOpPure *op_LSHIFT_503 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_504 = NEG(op_LSHIFT_503); + RzILOpPure *op_LSHIFT_509 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_512 = SUB(op_LSHIFT_509, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_513 = ITE(op_LT_498, op_NEG_504, op_SUB_512); + RzILOpEffect *gcc_expr_514 = BRANCH(op_EQ_473, EMPTY(), set_usr_field_call_486); + + // h_tmp539 = HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))))), 0x0, 0x10) == ((st64) (-((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((-((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff))))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_516 = SETL("h_tmp539", cond_513); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ((Rss ...; + RzILOpEffect *seq_517 = SEQN(2, gcc_expr_514, op_ASSIGN_hybrid_tmp_516); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x10)))) | (((ut64) (((sextract64(((ut64) (-((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))))), 0x0, 0x10) == ((st64) (-((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff))))))) ? ((st64) (-((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))))) : h_tmp539) & ((st64) 0xffff))) << 0x10))); + RzILOpPure *op_LSHIFT_441 = SHIFTL0(SN(64, 0xffff), SN(32, 16)); + RzILOpPure *op_NOT_442 = LOGNOT(op_LSHIFT_441); + RzILOpPure *op_AND_443 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_442); + RzILOpPure *op_RSHIFT_477 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_480 = LOGAND(op_RSHIFT_477, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_483 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_480), DUP(op_AND_480))), CAST(16, MSB(DUP(op_AND_480)), DUP(op_AND_480)))); + RzILOpPure *cond_519 = ITE(DUP(op_EQ_473), CAST(64, MSB(op_NEG_483), DUP(op_NEG_483)), VARL("h_tmp539")); + RzILOpPure *op_AND_522 = LOGAND(cond_519, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_527 = SHIFTL0(CAST(64, IL_FALSE, op_AND_522), SN(32, 16)); + RzILOpPure *op_OR_529 = LOGOR(CAST(64, IL_FALSE, op_AND_443), op_LSHIFT_527); + RzILOpEffect *op_ASSIGN_531 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_529)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ( ...; + RzILOpEffect *seq_532 = SEQN(2, seq_517, op_ASSIGN_531); + + // seq(seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st1 ...; + RzILOpEffect *seq_then_534 = SEQN(2, seq_305, op_ASSIGN_333); + + // seq(if ((((st32) tmp) != 0x3)) {seq({})} else {{}}; seq(seq(HYB( ...; + RzILOpEffect *seq_else_535 = SEQN(3, branch_339, seq_435, seq_532); + + // if ((((st32) tmp) == 0x2)) {seq(seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st1 ...} else {seq(if ((((st32) tmp) != 0x3)) {seq({})} else {{}}; seq(seq(HYB( ...}; + RzILOpPure *op_EQ_209 = EQ(CAST(32, IL_FALSE, VARL("tmp")), SN(32, 2)); + RzILOpEffect *branch_536 = BRANCH(op_EQ_209, seq_then_534, seq_else_535); + + // seq(Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x0)))) | (((ut64 ...; + RzILOpEffect *seq_then_537 = SEQN(2, op_ASSIGN_108, seq_205); + + // seq(if ((((st32) tmp) == 0x2)) {seq(seq(seq(HYB(gcc_expr_if ((se ...; + RzILOpEffect *seq_else_538 = branch_536; + + // if ((((st32) tmp) == 0x1)) {seq(Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x0)))) | (((ut64 ...} else {seq(if ((((st32) tmp) == 0x2)) {seq(seq(seq(HYB(gcc_expr_if ((se ...}; + RzILOpPure *op_EQ_81 = EQ(CAST(32, IL_FALSE, VARL("tmp")), SN(32, 1)); + RzILOpEffect *branch_539 = BRANCH(op_EQ_81, seq_then_537, seq_else_538); + + // seq(Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x0)))) | (((ut64 ...; + RzILOpEffect *seq_then_540 = SEQN(2, op_ASSIGN_49, op_ASSIGN_77); + + // seq(if ((((st32) tmp) == 0x1)) {seq(Rdd = ((st64) (((ut64) (Rdd ...; + RzILOpEffect *seq_else_541 = branch_539; + + // if ((((st32) tmp) == 0x0)) {seq(Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x0)))) | (((ut64 ...} else {seq(if ((((st32) tmp) == 0x1)) {seq(Rdd = ((st64) (((ut64) (Rdd ...}; + RzILOpPure *op_EQ_20 = EQ(CAST(32, IL_FALSE, VARL("tmp")), SN(32, 0)); + RzILOpEffect *branch_542 = BRANCH(op_EQ_20, seq_then_540, seq_else_541); + + // tmp = ((ut8) extract64(((ut64) Rt), 0x2, 0x2)); + RzILOpEffect *op_ASSIGN_558 = SETL("tmp", CAST(8, IL_FALSE, EXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 2), SN(32, 2)))); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x20)))) | (((ut64) (((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) & 0xffff)) << 0x20))); + RzILOpPure *op_LSHIFT_566 = SHIFTL0(SN(64, 0xffff), SN(32, 0x20)); + RzILOpPure *op_NOT_567 = LOGNOT(op_LSHIFT_566); + RzILOpPure *op_AND_568 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_567); + RzILOpPure *op_RSHIFT_572 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_575 = LOGAND(op_RSHIFT_572, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_579 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_575), DUP(op_AND_575))), CAST(16, MSB(DUP(op_AND_575)), DUP(op_AND_575))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_584 = SHIFTL0(CAST(64, IL_FALSE, op_AND_579), SN(32, 0x20)); + RzILOpPure *op_OR_586 = LOGOR(CAST(64, IL_FALSE, op_AND_568), op_LSHIFT_584); + RzILOpEffect *op_ASSIGN_588 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_586)); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x30)))) | (((ut64) (((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) & 0xffff)) << 0x30))); + RzILOpPure *op_LSHIFT_594 = SHIFTL0(SN(64, 0xffff), SN(32, 0x30)); + RzILOpPure *op_NOT_595 = LOGNOT(op_LSHIFT_594); + RzILOpPure *op_AND_596 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_595); + RzILOpPure *op_RSHIFT_600 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_603 = LOGAND(op_RSHIFT_600, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_607 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_603), DUP(op_AND_603))), CAST(16, MSB(DUP(op_AND_603)), DUP(op_AND_603))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_612 = SHIFTL0(CAST(64, IL_FALSE, op_AND_607), SN(32, 0x30)); + RzILOpPure *op_OR_614 = LOGOR(CAST(64, IL_FALSE, op_AND_596), op_LSHIFT_612); + RzILOpEffect *op_ASSIGN_616 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_614)); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x20)))) | (((ut64) (((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) & 0xffff)) << 0x20))); + RzILOpPure *op_LSHIFT_625 = SHIFTL0(SN(64, 0xffff), SN(32, 0x20)); + RzILOpPure *op_NOT_626 = LOGNOT(op_LSHIFT_625); + RzILOpPure *op_AND_627 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_626); + RzILOpPure *op_RSHIFT_631 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_634 = LOGAND(op_RSHIFT_631, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_638 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_634), DUP(op_AND_634))), CAST(16, MSB(DUP(op_AND_634)), DUP(op_AND_634))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_643 = SHIFTL0(CAST(64, IL_FALSE, op_AND_638), SN(32, 0x20)); + RzILOpPure *op_OR_645 = LOGOR(CAST(64, IL_FALSE, op_AND_627), op_LSHIFT_643); + RzILOpEffect *op_ASSIGN_647 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_645)); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_698 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))))), 0x0, 0x10) == ((st64) (-((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((-((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff))))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_662 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_665 = LOGAND(op_RSHIFT_662, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_668 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_665), DUP(op_AND_665))), CAST(16, MSB(DUP(op_AND_665)), DUP(op_AND_665)))); + RzILOpPure *op_RSHIFT_677 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_680 = LOGAND(op_RSHIFT_677, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_683 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_680), DUP(op_AND_680))), CAST(16, MSB(DUP(op_AND_680)), DUP(op_AND_680)))); + RzILOpPure *op_EQ_685 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_NEG_668), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_NEG_683), DUP(op_NEG_683))); + RzILOpPure *op_RSHIFT_702 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_705 = LOGAND(op_RSHIFT_702, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_708 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_705), DUP(op_AND_705))), CAST(16, MSB(DUP(op_AND_705)), DUP(op_AND_705)))); + RzILOpPure *op_LT_710 = SLT(op_NEG_708, SN(32, 0)); + RzILOpPure *op_LSHIFT_715 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_716 = NEG(op_LSHIFT_715); + RzILOpPure *op_LSHIFT_721 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_724 = SUB(op_LSHIFT_721, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_725 = ITE(op_LT_710, op_NEG_716, op_SUB_724); + RzILOpEffect *gcc_expr_726 = BRANCH(op_EQ_685, EMPTY(), set_usr_field_call_698); + + // h_tmp540 = HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))))), 0x0, 0x10) == ((st64) (-((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((-((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff))))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_728 = SETL("h_tmp540", cond_725); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ((Rss ...; + RzILOpEffect *seq_729 = SEQN(2, gcc_expr_726, op_ASSIGN_hybrid_tmp_728); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x30)))) | (((ut64) (((sextract64(((ut64) (-((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))))), 0x0, 0x10) == ((st64) (-((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff))))))) ? ((st64) (-((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))))) : h_tmp540) & ((st64) 0xffff))) << 0x30))); + RzILOpPure *op_LSHIFT_653 = SHIFTL0(SN(64, 0xffff), SN(32, 0x30)); + RzILOpPure *op_NOT_654 = LOGNOT(op_LSHIFT_653); + RzILOpPure *op_AND_655 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_654); + RzILOpPure *op_RSHIFT_689 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_692 = LOGAND(op_RSHIFT_689, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_695 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_692), DUP(op_AND_692))), CAST(16, MSB(DUP(op_AND_692)), DUP(op_AND_692)))); + RzILOpPure *cond_731 = ITE(DUP(op_EQ_685), CAST(64, MSB(op_NEG_695), DUP(op_NEG_695)), VARL("h_tmp540")); + RzILOpPure *op_AND_734 = LOGAND(cond_731, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_739 = SHIFTL0(CAST(64, IL_FALSE, op_AND_734), SN(32, 0x30)); + RzILOpPure *op_OR_741 = LOGOR(CAST(64, IL_FALSE, op_AND_655), op_LSHIFT_739); + RzILOpEffect *op_ASSIGN_743 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_741)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ( ...; + RzILOpEffect *seq_744 = SEQN(2, seq_729, op_ASSIGN_743); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_798 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))))), 0x0, 0x10) == ((st64) (-((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((-((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff))))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_762 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_765 = LOGAND(op_RSHIFT_762, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_768 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_765), DUP(op_AND_765))), CAST(16, MSB(DUP(op_AND_765)), DUP(op_AND_765)))); + RzILOpPure *op_RSHIFT_777 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_780 = LOGAND(op_RSHIFT_777, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_783 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_780), DUP(op_AND_780))), CAST(16, MSB(DUP(op_AND_780)), DUP(op_AND_780)))); + RzILOpPure *op_EQ_785 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_NEG_768), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_NEG_783), DUP(op_NEG_783))); + RzILOpPure *op_RSHIFT_802 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_805 = LOGAND(op_RSHIFT_802, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_808 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_805), DUP(op_AND_805))), CAST(16, MSB(DUP(op_AND_805)), DUP(op_AND_805)))); + RzILOpPure *op_LT_810 = SLT(op_NEG_808, SN(32, 0)); + RzILOpPure *op_LSHIFT_815 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_816 = NEG(op_LSHIFT_815); + RzILOpPure *op_LSHIFT_821 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_824 = SUB(op_LSHIFT_821, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_825 = ITE(op_LT_810, op_NEG_816, op_SUB_824); + RzILOpEffect *gcc_expr_826 = BRANCH(op_EQ_785, EMPTY(), set_usr_field_call_798); + + // h_tmp541 = HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))))), 0x0, 0x10) == ((st64) (-((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((-((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff))))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_828 = SETL("h_tmp541", cond_825); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ((Rss ...; + RzILOpEffect *seq_829 = SEQN(2, gcc_expr_826, op_ASSIGN_hybrid_tmp_828); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x20)))) | (((ut64) (((sextract64(((ut64) (-((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))))), 0x0, 0x10) == ((st64) (-((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff))))))) ? ((st64) (-((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))))) : h_tmp541) & ((st64) 0xffff))) << 0x20))); + RzILOpPure *op_LSHIFT_753 = SHIFTL0(SN(64, 0xffff), SN(32, 0x20)); + RzILOpPure *op_NOT_754 = LOGNOT(op_LSHIFT_753); + RzILOpPure *op_AND_755 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_754); + RzILOpPure *op_RSHIFT_789 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_792 = LOGAND(op_RSHIFT_789, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_795 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_792), DUP(op_AND_792))), CAST(16, MSB(DUP(op_AND_792)), DUP(op_AND_792)))); + RzILOpPure *cond_831 = ITE(DUP(op_EQ_785), CAST(64, MSB(op_NEG_795), DUP(op_NEG_795)), VARL("h_tmp541")); + RzILOpPure *op_AND_834 = LOGAND(cond_831, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_839 = SHIFTL0(CAST(64, IL_FALSE, op_AND_834), SN(32, 0x20)); + RzILOpPure *op_OR_841 = LOGOR(CAST(64, IL_FALSE, op_AND_755), op_LSHIFT_839); + RzILOpEffect *op_ASSIGN_843 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_841)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ( ...; + RzILOpEffect *seq_844 = SEQN(2, seq_829, op_ASSIGN_843); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x30)))) | (((ut64) (((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) & 0xffff)) << 0x30))); + RzILOpPure *op_LSHIFT_850 = SHIFTL0(SN(64, 0xffff), SN(32, 0x30)); + RzILOpPure *op_NOT_851 = LOGNOT(op_LSHIFT_850); + RzILOpPure *op_AND_852 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_851); + RzILOpPure *op_RSHIFT_856 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_859 = LOGAND(op_RSHIFT_856, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_863 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_859), DUP(op_AND_859))), CAST(16, MSB(DUP(op_AND_859)), DUP(op_AND_859))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_868 = SHIFTL0(CAST(64, IL_FALSE, op_AND_863), SN(32, 0x30)); + RzILOpPure *op_OR_870 = LOGOR(CAST(64, IL_FALSE, op_AND_852), op_LSHIFT_868); + RzILOpEffect *op_ASSIGN_872 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_870)); + + // seq({}); + RzILOpEffect *seq_then_877 = EMPTY(); + + // if ((((st32) tmp) != 0x3)) {seq({})} else {{}}; + RzILOpPure *op_NE_876 = INV(EQ(CAST(32, IL_FALSE, VARL("tmp")), SN(32, 3))); + RzILOpEffect *branch_878 = BRANCH(op_NE_876, seq_then_877, EMPTY()); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_928 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))))), 0x0, 0x10) == ((st64) (-((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((-((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff))))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_892 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_895 = LOGAND(op_RSHIFT_892, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_898 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_895), DUP(op_AND_895))), CAST(16, MSB(DUP(op_AND_895)), DUP(op_AND_895)))); + RzILOpPure *op_RSHIFT_907 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_910 = LOGAND(op_RSHIFT_907, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_913 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_910), DUP(op_AND_910))), CAST(16, MSB(DUP(op_AND_910)), DUP(op_AND_910)))); + RzILOpPure *op_EQ_915 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_NEG_898), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_NEG_913), DUP(op_NEG_913))); + RzILOpPure *op_RSHIFT_932 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_935 = LOGAND(op_RSHIFT_932, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_938 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_935), DUP(op_AND_935))), CAST(16, MSB(DUP(op_AND_935)), DUP(op_AND_935)))); + RzILOpPure *op_LT_940 = SLT(op_NEG_938, SN(32, 0)); + RzILOpPure *op_LSHIFT_945 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_946 = NEG(op_LSHIFT_945); + RzILOpPure *op_LSHIFT_951 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_954 = SUB(op_LSHIFT_951, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_955 = ITE(op_LT_940, op_NEG_946, op_SUB_954); + RzILOpEffect *gcc_expr_956 = BRANCH(op_EQ_915, EMPTY(), set_usr_field_call_928); + + // h_tmp542 = HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))))), 0x0, 0x10) == ((st64) (-((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((-((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff))))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_958 = SETL("h_tmp542", cond_955); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ((Rss ...; + RzILOpEffect *seq_959 = SEQN(2, gcc_expr_956, op_ASSIGN_hybrid_tmp_958); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x20)))) | (((ut64) (((sextract64(((ut64) (-((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))))), 0x0, 0x10) == ((st64) (-((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff))))))) ? ((st64) (-((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))))) : h_tmp542) & ((st64) 0xffff))) << 0x20))); + RzILOpPure *op_LSHIFT_883 = SHIFTL0(SN(64, 0xffff), SN(32, 0x20)); + RzILOpPure *op_NOT_884 = LOGNOT(op_LSHIFT_883); + RzILOpPure *op_AND_885 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_884); + RzILOpPure *op_RSHIFT_919 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_922 = LOGAND(op_RSHIFT_919, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_925 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_922), DUP(op_AND_922))), CAST(16, MSB(DUP(op_AND_922)), DUP(op_AND_922)))); + RzILOpPure *cond_961 = ITE(DUP(op_EQ_915), CAST(64, MSB(op_NEG_925), DUP(op_NEG_925)), VARL("h_tmp542")); + RzILOpPure *op_AND_964 = LOGAND(cond_961, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_969 = SHIFTL0(CAST(64, IL_FALSE, op_AND_964), SN(32, 0x20)); + RzILOpPure *op_OR_971 = LOGOR(CAST(64, IL_FALSE, op_AND_885), op_LSHIFT_969); + RzILOpEffect *op_ASSIGN_973 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_971)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ( ...; + RzILOpEffect *seq_974 = SEQN(2, seq_959, op_ASSIGN_973); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_1025 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))))), 0x0, 0x10) == ((st64) (-((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((-((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff))))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_989 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_992 = LOGAND(op_RSHIFT_989, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_995 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_992), DUP(op_AND_992))), CAST(16, MSB(DUP(op_AND_992)), DUP(op_AND_992)))); + RzILOpPure *op_RSHIFT_1004 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_1007 = LOGAND(op_RSHIFT_1004, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_1010 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_1007), DUP(op_AND_1007))), CAST(16, MSB(DUP(op_AND_1007)), DUP(op_AND_1007)))); + RzILOpPure *op_EQ_1012 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_NEG_995), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_NEG_1010), DUP(op_NEG_1010))); + RzILOpPure *op_RSHIFT_1029 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_1032 = LOGAND(op_RSHIFT_1029, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_1035 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_1032), DUP(op_AND_1032))), CAST(16, MSB(DUP(op_AND_1032)), DUP(op_AND_1032)))); + RzILOpPure *op_LT_1037 = SLT(op_NEG_1035, SN(32, 0)); + RzILOpPure *op_LSHIFT_1042 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_1043 = NEG(op_LSHIFT_1042); + RzILOpPure *op_LSHIFT_1048 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_1051 = SUB(op_LSHIFT_1048, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_1052 = ITE(op_LT_1037, op_NEG_1043, op_SUB_1051); + RzILOpEffect *gcc_expr_1053 = BRANCH(op_EQ_1012, EMPTY(), set_usr_field_call_1025); + + // h_tmp543 = HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))))), 0x0, 0x10) == ((st64) (-((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((-((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff))))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_1055 = SETL("h_tmp543", cond_1052); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ((Rss ...; + RzILOpEffect *seq_1056 = SEQN(2, gcc_expr_1053, op_ASSIGN_hybrid_tmp_1055); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x30)))) | (((ut64) (((sextract64(((ut64) (-((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))))), 0x0, 0x10) == ((st64) (-((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff))))))) ? ((st64) (-((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))))) : h_tmp543) & ((st64) 0xffff))) << 0x30))); + RzILOpPure *op_LSHIFT_980 = SHIFTL0(SN(64, 0xffff), SN(32, 0x30)); + RzILOpPure *op_NOT_981 = LOGNOT(op_LSHIFT_980); + RzILOpPure *op_AND_982 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_981); + RzILOpPure *op_RSHIFT_1016 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_1019 = LOGAND(op_RSHIFT_1016, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_1022 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_1019), DUP(op_AND_1019))), CAST(16, MSB(DUP(op_AND_1019)), DUP(op_AND_1019)))); + RzILOpPure *cond_1058 = ITE(DUP(op_EQ_1012), CAST(64, MSB(op_NEG_1022), DUP(op_NEG_1022)), VARL("h_tmp543")); + RzILOpPure *op_AND_1061 = LOGAND(cond_1058, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_1066 = SHIFTL0(CAST(64, IL_FALSE, op_AND_1061), SN(32, 0x30)); + RzILOpPure *op_OR_1068 = LOGOR(CAST(64, IL_FALSE, op_AND_982), op_LSHIFT_1066); + RzILOpEffect *op_ASSIGN_1070 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_1068)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st16) ( ...; + RzILOpEffect *seq_1071 = SEQN(2, seq_1056, op_ASSIGN_1070); + + // seq(seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st1 ...; + RzILOpEffect *seq_then_1073 = SEQN(2, seq_844, op_ASSIGN_872); + + // seq(if ((((st32) tmp) != 0x3)) {seq({})} else {{}}; seq(seq(HYB( ...; + RzILOpEffect *seq_else_1074 = SEQN(3, branch_878, seq_974, seq_1071); + + // if ((((st32) tmp) == 0x2)) {seq(seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (-((st32) ((st1 ...} else {seq(if ((((st32) tmp) != 0x3)) {seq({})} else {{}}; seq(seq(HYB( ...}; + RzILOpPure *op_EQ_748 = EQ(CAST(32, IL_FALSE, VARL("tmp")), SN(32, 2)); + RzILOpEffect *branch_1075 = BRANCH(op_EQ_748, seq_then_1073, seq_else_1074); + + // seq(Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x20)))) | (((ut6 ...; + RzILOpEffect *seq_then_1076 = SEQN(2, op_ASSIGN_647, seq_744); + + // seq(if ((((st32) tmp) == 0x2)) {seq(seq(seq(HYB(gcc_expr_if ((se ...; + RzILOpEffect *seq_else_1077 = branch_1075; + + // if ((((st32) tmp) == 0x1)) {seq(Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x20)))) | (((ut6 ...} else {seq(if ((((st32) tmp) == 0x2)) {seq(seq(seq(HYB(gcc_expr_if ((se ...}; + RzILOpPure *op_EQ_620 = EQ(CAST(32, IL_FALSE, VARL("tmp")), SN(32, 1)); + RzILOpEffect *branch_1078 = BRANCH(op_EQ_620, seq_then_1076, seq_else_1077); + + // seq(Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x20)))) | (((ut6 ...; + RzILOpEffect *seq_then_1079 = SEQN(2, op_ASSIGN_588, op_ASSIGN_616); + + // seq(if ((((st32) tmp) == 0x1)) {seq(Rdd = ((st64) (((ut64) (Rdd ...; + RzILOpEffect *seq_else_1080 = branch_1078; + + // if ((((st32) tmp) == 0x0)) {seq(Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x20)))) | (((ut6 ...} else {seq(if ((((st32) tmp) == 0x1)) {seq(Rdd = ((st64) (((ut64) (Rdd ...}; + RzILOpPure *op_EQ_561 = EQ(CAST(32, IL_FALSE, VARL("tmp")), SN(32, 0)); + RzILOpEffect *branch_1081 = BRANCH(op_EQ_561, seq_then_1079, seq_else_1080); + + RzILOpEffect *instruction_sequence = SEQN(4, op_ASSIGN_17, branch_542, op_ASSIGN_558, branch_1081); + return instruction_sequence; +} + +// Rxx += vrcnegh(Rss,Rt) +RzILOpEffect *hex_il_op_s2_vrcnegh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp544 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp544", VARL("i")); + + // seq(h_tmp544 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rxx = Rxx + ((st64) (-((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))))); + RzILOpPure *op_MUL_16 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rss, op_MUL_16); + RzILOpPure *op_AND_20 = LOGAND(op_RSHIFT_17, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_NEG_23 = NEG(CAST(32, MSB(CAST(16, MSB(op_AND_20), DUP(op_AND_20))), CAST(16, MSB(DUP(op_AND_20)), DUP(op_AND_20)))); + RzILOpPure *op_ADD_25 = ADD(READ_REG(pkt, Rxx_op, false), CAST(64, MSB(op_NEG_23), DUP(op_NEG_23))); + RzILOpEffect *op_ASSIGN_ADD_26 = WRITE_REG(bundle, Rxx_op, op_ADD_25); + + // Rxx = Rxx + ((st64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))); + RzILOpPure *op_MUL_28 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_29 = SHIFTRA(DUP(Rss), op_MUL_28); + RzILOpPure *op_AND_32 = LOGAND(op_RSHIFT_29, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_35 = ADD(READ_REG(pkt, Rxx_op, false), CAST(64, MSB(CAST(16, MSB(op_AND_32), DUP(op_AND_32))), CAST(16, MSB(DUP(op_AND_32)), DUP(op_AND_32)))); + RzILOpEffect *op_ASSIGN_ADD_36 = WRITE_REG(bundle, Rxx_op, op_ADD_35); + + // seq(Rxx = Rxx + ((st64) (-((st32) ((st16) ((Rss >> i * 0x10) & ( ...; + RzILOpEffect *seq_then_37 = op_ASSIGN_ADD_26; + + // seq(Rxx = Rxx + ((st64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xf ...; + RzILOpEffect *seq_else_38 = op_ASSIGN_ADD_36; + + // if (((Rt >> i) & 0x1)) {seq(Rxx = Rxx + ((st64) (-((st32) ((st16) ((Rss >> i * 0x10) & ( ...} else {seq(Rxx = Rxx + ((st64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xf ...}; + RzILOpPure *op_RSHIFT_10 = SHIFTRA(Rt, VARL("i")); + RzILOpPure *op_AND_12 = LOGAND(op_RSHIFT_10, SN(32, 1)); + RzILOpEffect *branch_39 = BRANCH(NON_ZERO(op_AND_12), seq_then_37, seq_else_38); + + // seq(h_tmp544; if (((Rt >> i) & 0x1)) {seq(Rxx = Rxx + ((st64) (- ...; + RzILOpEffect *seq_40 = branch_39; + + // seq(seq(h_tmp544; if (((Rt >> i) & 0x1)) {seq(Rxx = Rxx + ((st64 ...; + RzILOpEffect *seq_41 = SEQN(2, seq_40, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp544; if (((Rt >> i) & 0x1)) {seq(Rxx = Rxx + ((st64 ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_42 = REPEAT(op_LT_4, seq_41); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp544; if (((Rt >> i ...; + RzILOpEffect *seq_43 = SEQN(2, op_ASSIGN_2, for_42); + + RzILOpEffect *instruction_sequence = seq_43; + return instruction_sequence; +} + +// Rd = vrndwh(Rss) +RzILOpEffect *hex_il_op_s2_vrndpackwh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp545 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp545", VARL("i")); + + // seq(h_tmp545 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((st16) ((((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) + ((st64) 0x8000) >> 0x10) & ((st64) 0xffff)))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_16 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_14); + RzILOpPure *op_MUL_19 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_20 = SHIFTRA(Rss, op_MUL_19); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_20, SN(64, 0xffffffff)); + RzILOpPure *op_ADD_27 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_22), DUP(op_AND_22))), CAST(32, MSB(DUP(op_AND_22)), DUP(op_AND_22))), CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_RSHIFT_31 = SHIFTRA(op_ADD_27, SN(32, 16)); + RzILOpPure *op_AND_34 = LOGAND(op_RSHIFT_31, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_38 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_34), DUP(op_AND_34))), CAST(16, MSB(DUP(op_AND_34)), DUP(op_AND_34))), SN(32, 0xffff)); + RzILOpPure *op_MUL_41 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_42 = SHIFTL0(CAST(64, IL_FALSE, op_AND_38), op_MUL_41); + RzILOpPure *op_OR_44 = LOGOR(CAST(64, IL_FALSE, op_AND_16), op_LSHIFT_42); + RzILOpEffect *op_ASSIGN_46 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_44)); + + // seq(h_tmp545; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << ...; + RzILOpEffect *seq_48 = op_ASSIGN_46; + + // seq(seq(h_tmp545; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ...; + RzILOpEffect *seq_49 = SEQN(2, seq_48, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp545; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_50 = REPEAT(op_LT_4, seq_49); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp545; Rd = ((st32) ...; + RzILOpEffect *seq_51 = SEQN(2, op_ASSIGN_2, for_50); + + RzILOpEffect *instruction_sequence = seq_51; + return instruction_sequence; +} + +// Rd = vrndwh(Rss):sat +RzILOpEffect *hex_il_op_s2_vrndpackwhs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp546 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp546", VARL("i")); + + // seq(h_tmp546 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_59 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) + ((st64) 0x8000)), 0x0, 0x20) == ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_MUL_22 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rss, op_MUL_22); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(64, 0xffffffff)); + RzILOpPure *op_ADD_30 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_25), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_MUL_37 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_38 = SHIFTRA(DUP(Rss), op_MUL_37); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_38, SN(64, 0xffffffff)); + RzILOpPure *op_ADD_45 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_40), DUP(op_AND_40))), CAST(32, MSB(DUP(op_AND_40)), DUP(op_AND_40))), CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_EQ_46 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_30), SN(32, 0), SN(32, 0x20)), op_ADD_45); + RzILOpPure *op_MUL_61 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_62 = SHIFTRA(DUP(Rss), op_MUL_61); + RzILOpPure *op_AND_64 = LOGAND(op_RSHIFT_62, SN(64, 0xffffffff)); + RzILOpPure *op_ADD_69 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_64), DUP(op_AND_64))), CAST(32, MSB(DUP(op_AND_64)), DUP(op_AND_64))), CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *op_LT_72 = SLT(op_ADD_69, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_77 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_78 = NEG(op_LSHIFT_77); + RzILOpPure *op_LSHIFT_83 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_86 = SUB(op_LSHIFT_83, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_87 = ITE(op_LT_72, op_NEG_78, op_SUB_86); + RzILOpEffect *gcc_expr_88 = BRANCH(op_EQ_46, EMPTY(), set_usr_field_call_59); + + // h_tmp547 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) + ((st64) 0x8000)), 0x0, 0x20) == ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) + ((st64) 0x8000))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) + ((st64) 0x8000) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_90 = SETL("h_tmp547", cond_87); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rss > ...; + RzILOpEffect *seq_91 = SEQN(2, gcc_expr_88, op_ASSIGN_hybrid_tmp_90); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((st16) ((((sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) + ((st64) 0x8000)), 0x0, 0x20) == ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) + ((st64) 0x8000)) ? ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) + ((st64) 0x8000) : h_tmp547) >> 0x10) & ((st64) 0xffff)))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_16 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_14); + RzILOpPure *op_MUL_48 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_49 = SHIFTRA(DUP(Rss), op_MUL_48); + RzILOpPure *op_AND_51 = LOGAND(op_RSHIFT_49, SN(64, 0xffffffff)); + RzILOpPure *op_ADD_56 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_51), DUP(op_AND_51))), CAST(32, MSB(DUP(op_AND_51)), DUP(op_AND_51))), CAST(64, MSB(SN(32, 0x8000)), SN(32, 0x8000))); + RzILOpPure *cond_92 = ITE(DUP(op_EQ_46), op_ADD_56, VARL("h_tmp547")); + RzILOpPure *op_RSHIFT_96 = SHIFTRA(cond_92, SN(32, 16)); + RzILOpPure *op_AND_99 = LOGAND(op_RSHIFT_96, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_103 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_99), DUP(op_AND_99))), CAST(16, MSB(DUP(op_AND_99)), DUP(op_AND_99))), SN(32, 0xffff)); + RzILOpPure *op_MUL_106 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_107 = SHIFTL0(CAST(64, IL_FALSE, op_AND_103), op_MUL_106); + RzILOpPure *op_OR_109 = LOGOR(CAST(64, IL_FALSE, op_AND_16), op_LSHIFT_107); + RzILOpEffect *op_ASSIGN_111 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_109)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_112 = SEQN(2, seq_91, op_ASSIGN_111); + + // seq(h_tmp546; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st6 ...; + RzILOpEffect *seq_114 = seq_112; + + // seq(seq(h_tmp546; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ( ...; + RzILOpEffect *seq_115 = SEQN(2, seq_114, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp546; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_116 = REPEAT(op_LT_4, seq_115); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp546; seq(seq(HYB(g ...; + RzILOpEffect *seq_117 = SEQN(2, op_ASSIGN_2, for_116); + + RzILOpEffect *instruction_sequence = seq_117; + return instruction_sequence; +} + +// Rd = vsathb(Rss) +RzILOpEffect *hex_il_op_s2_vsathb(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp548 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp548", VARL("i")); + + // seq(h_tmp548 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_51 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))), 0x0, 0x8) == ((st64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) < 0x0) ? (-(0x1 << 0x7)) : (0x1 << 0x7) - ((st64) 0x1))); + RzILOpPure *op_MUL_22 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rss, op_MUL_22); + RzILOpPure *op_AND_26 = LOGAND(op_RSHIFT_23, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_34 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rss), op_MUL_34); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_35, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_EQ_41 = EQ(SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_26), DUP(op_AND_26))), SN(32, 0), SN(32, 8)), CAST(64, MSB(CAST(16, MSB(op_AND_38), DUP(op_AND_38))), CAST(16, MSB(DUP(op_AND_38)), DUP(op_AND_38)))); + RzILOpPure *op_MUL_53 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_54 = SHIFTRA(DUP(Rss), op_MUL_53); + RzILOpPure *op_AND_57 = LOGAND(op_RSHIFT_54, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LT_61 = SLT(CAST(32, MSB(CAST(16, MSB(op_AND_57), DUP(op_AND_57))), CAST(16, MSB(DUP(op_AND_57)), DUP(op_AND_57))), SN(32, 0)); + RzILOpPure *op_LSHIFT_66 = SHIFTL0(SN(64, 1), SN(32, 7)); + RzILOpPure *op_NEG_67 = NEG(op_LSHIFT_66); + RzILOpPure *op_LSHIFT_72 = SHIFTL0(SN(64, 1), SN(32, 7)); + RzILOpPure *op_SUB_75 = SUB(op_LSHIFT_72, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_76 = ITE(op_LT_61, op_NEG_67, op_SUB_75); + RzILOpEffect *gcc_expr_77 = BRANCH(op_EQ_41, EMPTY(), set_usr_field_call_51); + + // h_tmp549 = HYB(gcc_expr_if ((sextract64(((ut64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))), 0x0, 0x8) == ((st64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) < 0x0) ? (-(0x1 << 0x7)) : (0x1 << 0x7) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_79 = SETL("h_tmp549", cond_76); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st16) ((Rss >> i * 0x ...; + RzILOpEffect *seq_80 = SEQN(2, gcc_expr_77, op_ASSIGN_hybrid_tmp_79); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xff << i * 0x8)))) | (((ut64) (((sextract64(((ut64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))), 0x0, 0x8) == ((st64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))) ? ((st64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) : h_tmp549) & 0xff)) << i * 0x8))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_16 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_14); + RzILOpPure *op_MUL_43 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_44 = SHIFTRA(DUP(Rss), op_MUL_43); + RzILOpPure *op_AND_47 = LOGAND(op_RSHIFT_44, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *cond_82 = ITE(DUP(op_EQ_41), CAST(64, MSB(CAST(16, MSB(op_AND_47), DUP(op_AND_47))), CAST(16, MSB(DUP(op_AND_47)), DUP(op_AND_47))), VARL("h_tmp549")); + RzILOpPure *op_AND_84 = LOGAND(cond_82, SN(64, 0xff)); + RzILOpPure *op_MUL_87 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_88 = SHIFTL0(CAST(64, IL_FALSE, op_AND_84), op_MUL_87); + RzILOpPure *op_OR_90 = LOGOR(CAST(64, IL_FALSE, op_AND_16), op_LSHIFT_88); + RzILOpEffect *op_ASSIGN_92 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_90)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st16) ((Rss >> i ...; + RzILOpEffect *seq_93 = SEQN(2, seq_80, op_ASSIGN_92); + + // seq(h_tmp548; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st1 ...; + RzILOpEffect *seq_95 = seq_93; + + // seq(seq(h_tmp548; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ( ...; + RzILOpEffect *seq_96 = SEQN(2, seq_95, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp548; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_97 = REPEAT(op_LT_4, seq_96); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp548; seq(seq(HYB(g ...; + RzILOpEffect *seq_98 = SEQN(2, op_ASSIGN_2, for_97); + + RzILOpEffect *instruction_sequence = seq_98; + return instruction_sequence; +} + +// Rdd = vsathb(Rss) +RzILOpEffect *hex_il_op_s2_vsathb_nopack(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp550 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp550", VARL("i")); + + // seq(h_tmp550 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_50 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))), 0x0, 0x8) == ((st64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) < 0x0) ? (-(0x1 << 0x7)) : (0x1 << 0x7) - ((st64) 0x1))); + RzILOpPure *op_MUL_21 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(Rss, op_MUL_21); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_22, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_33 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_34 = SHIFTRA(DUP(Rss), op_MUL_33); + RzILOpPure *op_AND_37 = LOGAND(op_RSHIFT_34, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_EQ_40 = EQ(SEXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_25), DUP(op_AND_25))), SN(32, 0), SN(32, 8)), CAST(64, MSB(CAST(16, MSB(op_AND_37), DUP(op_AND_37))), CAST(16, MSB(DUP(op_AND_37)), DUP(op_AND_37)))); + RzILOpPure *op_MUL_52 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_53 = SHIFTRA(DUP(Rss), op_MUL_52); + RzILOpPure *op_AND_56 = LOGAND(op_RSHIFT_53, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LT_60 = SLT(CAST(32, MSB(CAST(16, MSB(op_AND_56), DUP(op_AND_56))), CAST(16, MSB(DUP(op_AND_56)), DUP(op_AND_56))), SN(32, 0)); + RzILOpPure *op_LSHIFT_65 = SHIFTL0(SN(64, 1), SN(32, 7)); + RzILOpPure *op_NEG_66 = NEG(op_LSHIFT_65); + RzILOpPure *op_LSHIFT_71 = SHIFTL0(SN(64, 1), SN(32, 7)); + RzILOpPure *op_SUB_74 = SUB(op_LSHIFT_71, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_75 = ITE(op_LT_60, op_NEG_66, op_SUB_74); + RzILOpEffect *gcc_expr_76 = BRANCH(op_EQ_40, EMPTY(), set_usr_field_call_50); + + // h_tmp551 = HYB(gcc_expr_if ((sextract64(((ut64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))), 0x0, 0x8) == ((st64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) < 0x0) ? (-(0x1 << 0x7)) : (0x1 << 0x7) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_78 = SETL("h_tmp551", cond_75); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st16) ((Rss >> i * 0x ...; + RzILOpEffect *seq_79 = SEQN(2, gcc_expr_76, op_ASSIGN_hybrid_tmp_78); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((sextract64(((ut64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))), 0x0, 0x8) == ((st64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))) ? ((st64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) : h_tmp551) & ((st64) 0xffff))) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_42 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_43 = SHIFTRA(DUP(Rss), op_MUL_42); + RzILOpPure *op_AND_46 = LOGAND(op_RSHIFT_43, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *cond_81 = ITE(DUP(op_EQ_40), CAST(64, MSB(CAST(16, MSB(op_AND_46), DUP(op_AND_46))), CAST(16, MSB(DUP(op_AND_46)), DUP(op_AND_46))), VARL("h_tmp551")); + RzILOpPure *op_AND_84 = LOGAND(cond_81, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_87 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_88 = SHIFTL0(CAST(64, IL_FALSE, op_AND_84), op_MUL_87); + RzILOpPure *op_OR_90 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_88); + RzILOpEffect *op_ASSIGN_92 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_90)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st16) ((Rss >> i ...; + RzILOpEffect *seq_93 = SEQN(2, seq_79, op_ASSIGN_92); + + // seq(h_tmp550; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st1 ...; + RzILOpEffect *seq_95 = seq_93; + + // seq(seq(h_tmp550; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ( ...; + RzILOpEffect *seq_96 = SEQN(2, seq_95, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp550; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_97 = REPEAT(op_LT_4, seq_96); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp550; seq(seq(HYB(g ...; + RzILOpEffect *seq_98 = SEQN(2, op_ASSIGN_2, for_97); + + RzILOpEffect *instruction_sequence = seq_98; + return instruction_sequence; +} + +// Rd = vsathub(Rss) +RzILOpEffect *hex_il_op_s2_vsathub(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp552 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp552", VARL("i")); + + // seq(h_tmp552 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_51 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((extract64(((ut64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))), 0x0, 0x8) == ((ut64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) < 0x0) ? ((st64) 0x0) : (0x1 << 0x8) - ((st64) 0x1))); + RzILOpPure *op_MUL_22 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rss, op_MUL_22); + RzILOpPure *op_AND_26 = LOGAND(op_RSHIFT_23, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_34 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rss), op_MUL_34); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_35, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_EQ_41 = EQ(EXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_26), DUP(op_AND_26))), SN(32, 0), SN(32, 8)), CAST(64, IL_FALSE, CAST(16, MSB(op_AND_38), DUP(op_AND_38)))); + RzILOpPure *op_MUL_53 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_54 = SHIFTRA(DUP(Rss), op_MUL_53); + RzILOpPure *op_AND_57 = LOGAND(op_RSHIFT_54, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LT_61 = SLT(CAST(32, MSB(CAST(16, MSB(op_AND_57), DUP(op_AND_57))), CAST(16, MSB(DUP(op_AND_57)), DUP(op_AND_57))), SN(32, 0)); + RzILOpPure *op_LSHIFT_65 = SHIFTL0(SN(64, 1), SN(32, 8)); + RzILOpPure *op_SUB_68 = SUB(op_LSHIFT_65, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_70 = ITE(op_LT_61, CAST(64, MSB(SN(32, 0)), SN(32, 0)), op_SUB_68); + RzILOpEffect *gcc_expr_71 = BRANCH(op_EQ_41, EMPTY(), set_usr_field_call_51); + + // h_tmp553 = HYB(gcc_expr_if ((extract64(((ut64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))), 0x0, 0x8) == ((ut64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) < 0x0) ? ((st64) 0x0) : (0x1 << 0x8) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_73 = SETL("h_tmp553", cond_70); + + // seq(HYB(gcc_expr_if ((extract64(((ut64) ((st16) ((Rss >> i * 0x1 ...; + RzILOpEffect *seq_74 = SEQN(2, gcc_expr_71, op_ASSIGN_hybrid_tmp_73); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xff << i * 0x8)))) | (((ut64) (((extract64(((ut64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))), 0x0, 0x8) == ((ut64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))) ? ((st64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) : h_tmp553) & 0xff)) << i * 0x8))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_16 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_14); + RzILOpPure *op_MUL_43 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_44 = SHIFTRA(DUP(Rss), op_MUL_43); + RzILOpPure *op_AND_47 = LOGAND(op_RSHIFT_44, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *cond_76 = ITE(DUP(op_EQ_41), CAST(64, MSB(CAST(16, MSB(op_AND_47), DUP(op_AND_47))), CAST(16, MSB(DUP(op_AND_47)), DUP(op_AND_47))), VARL("h_tmp553")); + RzILOpPure *op_AND_78 = LOGAND(cond_76, SN(64, 0xff)); + RzILOpPure *op_MUL_81 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_82 = SHIFTL0(CAST(64, IL_FALSE, op_AND_78), op_MUL_81); + RzILOpPure *op_OR_84 = LOGOR(CAST(64, IL_FALSE, op_AND_16), op_LSHIFT_82); + RzILOpEffect *op_ASSIGN_86 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_84)); + + // seq(seq(HYB(gcc_expr_if ((extract64(((ut64) ((st16) ((Rss >> i * ...; + RzILOpEffect *seq_87 = SEQN(2, seq_74, op_ASSIGN_86); + + // seq(h_tmp552; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) ((st16 ...; + RzILOpEffect *seq_89 = seq_87; + + // seq(seq(h_tmp552; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) (( ...; + RzILOpEffect *seq_90 = SEQN(2, seq_89, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp552; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) (( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_91 = REPEAT(op_LT_4, seq_90); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp552; seq(seq(HYB(g ...; + RzILOpEffect *seq_92 = SEQN(2, op_ASSIGN_2, for_91); + + RzILOpEffect *instruction_sequence = seq_92; + return instruction_sequence; +} + +// Rdd = vsathub(Rss) +RzILOpEffect *hex_il_op_s2_vsathub_nopack(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp554 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp554", VARL("i")); + + // seq(h_tmp554 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_50 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((extract64(((ut64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))), 0x0, 0x8) == ((ut64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) < 0x0) ? ((st64) 0x0) : (0x1 << 0x8) - ((st64) 0x1))); + RzILOpPure *op_MUL_21 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(Rss, op_MUL_21); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_22, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_33 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_34 = SHIFTRA(DUP(Rss), op_MUL_33); + RzILOpPure *op_AND_37 = LOGAND(op_RSHIFT_34, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_EQ_40 = EQ(EXTRACT64(CAST(64, IL_FALSE, CAST(16, MSB(op_AND_25), DUP(op_AND_25))), SN(32, 0), SN(32, 8)), CAST(64, IL_FALSE, CAST(16, MSB(op_AND_37), DUP(op_AND_37)))); + RzILOpPure *op_MUL_52 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_53 = SHIFTRA(DUP(Rss), op_MUL_52); + RzILOpPure *op_AND_56 = LOGAND(op_RSHIFT_53, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LT_60 = SLT(CAST(32, MSB(CAST(16, MSB(op_AND_56), DUP(op_AND_56))), CAST(16, MSB(DUP(op_AND_56)), DUP(op_AND_56))), SN(32, 0)); + RzILOpPure *op_LSHIFT_64 = SHIFTL0(SN(64, 1), SN(32, 8)); + RzILOpPure *op_SUB_67 = SUB(op_LSHIFT_64, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_69 = ITE(op_LT_60, CAST(64, MSB(SN(32, 0)), SN(32, 0)), op_SUB_67); + RzILOpEffect *gcc_expr_70 = BRANCH(op_EQ_40, EMPTY(), set_usr_field_call_50); + + // h_tmp555 = HYB(gcc_expr_if ((extract64(((ut64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))), 0x0, 0x8) == ((ut64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) < 0x0) ? ((st64) 0x0) : (0x1 << 0x8) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_72 = SETL("h_tmp555", cond_69); + + // seq(HYB(gcc_expr_if ((extract64(((ut64) ((st16) ((Rss >> i * 0x1 ...; + RzILOpEffect *seq_73 = SEQN(2, gcc_expr_70, op_ASSIGN_hybrid_tmp_72); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((extract64(((ut64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))), 0x0, 0x8) == ((ut64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))))) ? ((st64) ((st16) ((Rss >> i * 0x10) & ((st64) 0xffff)))) : h_tmp555) & ((st64) 0xffff))) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_42 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_43 = SHIFTRA(DUP(Rss), op_MUL_42); + RzILOpPure *op_AND_46 = LOGAND(op_RSHIFT_43, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *cond_75 = ITE(DUP(op_EQ_40), CAST(64, MSB(CAST(16, MSB(op_AND_46), DUP(op_AND_46))), CAST(16, MSB(DUP(op_AND_46)), DUP(op_AND_46))), VARL("h_tmp555")); + RzILOpPure *op_AND_78 = LOGAND(cond_75, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_81 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_82 = SHIFTL0(CAST(64, IL_FALSE, op_AND_78), op_MUL_81); + RzILOpPure *op_OR_84 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_82); + RzILOpEffect *op_ASSIGN_86 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_84)); + + // seq(seq(HYB(gcc_expr_if ((extract64(((ut64) ((st16) ((Rss >> i * ...; + RzILOpEffect *seq_87 = SEQN(2, seq_73, op_ASSIGN_86); + + // seq(h_tmp554; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) ((st16 ...; + RzILOpEffect *seq_89 = seq_87; + + // seq(seq(h_tmp554; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) (( ...; + RzILOpEffect *seq_90 = SEQN(2, seq_89, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp554; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) (( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_91 = REPEAT(op_LT_4, seq_90); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp554; seq(seq(HYB(g ...; + RzILOpEffect *seq_92 = SEQN(2, op_ASSIGN_2, for_91); + + RzILOpEffect *instruction_sequence = seq_92; + return instruction_sequence; +} + +// Rd = vsatwh(Rss) +RzILOpEffect *hex_il_op_s2_vsatwh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp556 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp556", VARL("i")); + + // seq(h_tmp556 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_50 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x10) == ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) < ((st64) 0x0)) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_MUL_22 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rss, op_MUL_22); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_34 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rss), op_MUL_34); + RzILOpPure *op_AND_37 = LOGAND(op_RSHIFT_35, SN(64, 0xffffffff)); + RzILOpPure *op_EQ_40 = EQ(SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_25), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), SN(32, 0), SN(32, 16)), CAST(64, MSB(CAST(32, MSB(op_AND_37), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37)))); + RzILOpPure *op_MUL_52 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_53 = SHIFTRA(DUP(Rss), op_MUL_52); + RzILOpPure *op_AND_55 = LOGAND(op_RSHIFT_53, SN(64, 0xffffffff)); + RzILOpPure *op_LT_60 = SLT(CAST(64, MSB(CAST(32, MSB(op_AND_55), DUP(op_AND_55))), CAST(32, MSB(DUP(op_AND_55)), DUP(op_AND_55))), CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_65 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_66 = NEG(op_LSHIFT_65); + RzILOpPure *op_LSHIFT_71 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_74 = SUB(op_LSHIFT_71, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_75 = ITE(op_LT_60, op_NEG_66, op_SUB_74); + RzILOpEffect *gcc_expr_76 = BRANCH(op_EQ_40, EMPTY(), set_usr_field_call_50); + + // h_tmp557 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x10) == ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) < ((st64) 0x0)) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_78 = SETL("h_tmp557", cond_75); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rss > ...; + RzILOpEffect *seq_79 = SEQN(2, gcc_expr_76, op_ASSIGN_hybrid_tmp_78); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << i * 0x10)))) | (((ut64) (((sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x10) == ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))) ? ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) : h_tmp557) & ((st64) 0xffff))) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_16 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_14); + RzILOpPure *op_MUL_42 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_43 = SHIFTRA(DUP(Rss), op_MUL_42); + RzILOpPure *op_AND_45 = LOGAND(op_RSHIFT_43, SN(64, 0xffffffff)); + RzILOpPure *cond_80 = ITE(DUP(op_EQ_40), CAST(64, MSB(CAST(32, MSB(op_AND_45), DUP(op_AND_45))), CAST(32, MSB(DUP(op_AND_45)), DUP(op_AND_45))), VARL("h_tmp557")); + RzILOpPure *op_AND_83 = LOGAND(cond_80, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_86 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_87 = SHIFTL0(CAST(64, IL_FALSE, op_AND_83), op_MUL_86); + RzILOpPure *op_OR_89 = LOGOR(CAST(64, IL_FALSE, op_AND_16), op_LSHIFT_87); + RzILOpEffect *op_ASSIGN_91 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_89)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_92 = SEQN(2, seq_79, op_ASSIGN_91); + + // seq(h_tmp556; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st6 ...; + RzILOpEffect *seq_94 = seq_92; + + // seq(seq(h_tmp556; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ( ...; + RzILOpEffect *seq_95 = SEQN(2, seq_94, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp556; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_96 = REPEAT(op_LT_4, seq_95); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp556; seq(seq(HYB(g ...; + RzILOpEffect *seq_97 = SEQN(2, op_ASSIGN_2, for_96); + + RzILOpEffect *instruction_sequence = seq_97; + return instruction_sequence; +} + +// Rdd = vsatwh(Rss) +RzILOpEffect *hex_il_op_s2_vsatwh_nopack(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp558 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp558", VARL("i")); + + // seq(h_tmp558 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_49 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x10) == ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) < ((st64) 0x0)) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_MUL_21 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(Rss, op_MUL_21); + RzILOpPure *op_AND_24 = LOGAND(op_RSHIFT_22, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_33 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_34 = SHIFTRA(DUP(Rss), op_MUL_33); + RzILOpPure *op_AND_36 = LOGAND(op_RSHIFT_34, SN(64, 0xffffffff)); + RzILOpPure *op_EQ_39 = EQ(SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_24), DUP(op_AND_24))), CAST(32, MSB(DUP(op_AND_24)), DUP(op_AND_24)))), SN(32, 0), SN(32, 16)), CAST(64, MSB(CAST(32, MSB(op_AND_36), DUP(op_AND_36))), CAST(32, MSB(DUP(op_AND_36)), DUP(op_AND_36)))); + RzILOpPure *op_MUL_51 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_52 = SHIFTRA(DUP(Rss), op_MUL_51); + RzILOpPure *op_AND_54 = LOGAND(op_RSHIFT_52, SN(64, 0xffffffff)); + RzILOpPure *op_LT_59 = SLT(CAST(64, MSB(CAST(32, MSB(op_AND_54), DUP(op_AND_54))), CAST(32, MSB(DUP(op_AND_54)), DUP(op_AND_54))), CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_64 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_65 = NEG(op_LSHIFT_64); + RzILOpPure *op_LSHIFT_70 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_73 = SUB(op_LSHIFT_70, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_74 = ITE(op_LT_59, op_NEG_65, op_SUB_73); + RzILOpEffect *gcc_expr_75 = BRANCH(op_EQ_39, EMPTY(), set_usr_field_call_49); + + // h_tmp559 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x10) == ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) < ((st64) 0x0)) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_77 = SETL("h_tmp559", cond_74); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rss > ...; + RzILOpEffect *seq_78 = SEQN(2, gcc_expr_75, op_ASSIGN_hybrid_tmp_77); + + // Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x10) == ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))) ? ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) : h_tmp559) & 0xffffffff) << i * 0x20)); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffffffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_41 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_42 = SHIFTRA(DUP(Rss), op_MUL_41); + RzILOpPure *op_AND_44 = LOGAND(op_RSHIFT_42, SN(64, 0xffffffff)); + RzILOpPure *cond_79 = ITE(DUP(op_EQ_39), CAST(64, MSB(CAST(32, MSB(op_AND_44), DUP(op_AND_44))), CAST(32, MSB(DUP(op_AND_44)), DUP(op_AND_44))), VARL("h_tmp559")); + RzILOpPure *op_AND_81 = LOGAND(cond_79, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_83 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_84 = SHIFTL0(op_AND_81, op_MUL_83); + RzILOpPure *op_OR_85 = LOGOR(op_AND_15, op_LSHIFT_84); + RzILOpEffect *op_ASSIGN_86 = WRITE_REG(bundle, Rdd_op, op_OR_85); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_87 = SEQN(2, seq_78, op_ASSIGN_86); + + // seq(h_tmp558; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st6 ...; + RzILOpEffect *seq_89 = seq_87; + + // seq(seq(h_tmp558; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ( ...; + RzILOpEffect *seq_90 = SEQN(2, seq_89, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp558; seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_91 = REPEAT(op_LT_4, seq_90); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp558; seq(seq(HYB(g ...; + RzILOpEffect *seq_92 = SEQN(2, op_ASSIGN_2, for_91); + + RzILOpEffect *instruction_sequence = seq_92; + return instruction_sequence; +} + +// Rd = vsatwuh(Rss) +RzILOpEffect *hex_il_op_s2_vsatwuh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp560 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp560", VARL("i")); + + // seq(h_tmp560 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_51 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((extract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x10) == ((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) < ((st64) 0x0)) ? ((st64) 0x0) : (0x1 << 0x10) - ((st64) 0x1))); + RzILOpPure *op_MUL_22 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rss, op_MUL_22); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_23, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_34 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(DUP(Rss), op_MUL_34); + RzILOpPure *op_AND_37 = LOGAND(op_RSHIFT_35, SN(64, 0xffffffff)); + RzILOpPure *op_EQ_41 = EQ(EXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_25), DUP(op_AND_25))), CAST(32, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), SN(32, 0), SN(32, 16)), CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_37), DUP(op_AND_37))), CAST(32, MSB(DUP(op_AND_37)), DUP(op_AND_37))))); + RzILOpPure *op_MUL_53 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_54 = SHIFTRA(DUP(Rss), op_MUL_53); + RzILOpPure *op_AND_56 = LOGAND(op_RSHIFT_54, SN(64, 0xffffffff)); + RzILOpPure *op_LT_61 = SLT(CAST(64, MSB(CAST(32, MSB(op_AND_56), DUP(op_AND_56))), CAST(32, MSB(DUP(op_AND_56)), DUP(op_AND_56))), CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_65 = SHIFTL0(SN(64, 1), SN(32, 16)); + RzILOpPure *op_SUB_68 = SUB(op_LSHIFT_65, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_70 = ITE(op_LT_61, CAST(64, MSB(SN(32, 0)), SN(32, 0)), op_SUB_68); + RzILOpEffect *gcc_expr_71 = BRANCH(op_EQ_41, EMPTY(), set_usr_field_call_51); + + // h_tmp561 = HYB(gcc_expr_if ((extract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x10) == ((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) < ((st64) 0x0)) ? ((st64) 0x0) : (0x1 << 0x10) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_73 = SETL("h_tmp561", cond_70); + + // seq(HYB(gcc_expr_if ((extract64(((ut64) ((st64) ((st32) ((Rss >> ...; + RzILOpEffect *seq_74 = SEQN(2, gcc_expr_71, op_ASSIGN_hybrid_tmp_73); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xffff << i * 0x10)))) | (((ut64) (((extract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x10) == ((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))))) ? ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) : h_tmp561) & ((st64) 0xffff))) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_16 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_14); + RzILOpPure *op_MUL_43 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_44 = SHIFTRA(DUP(Rss), op_MUL_43); + RzILOpPure *op_AND_46 = LOGAND(op_RSHIFT_44, SN(64, 0xffffffff)); + RzILOpPure *cond_75 = ITE(DUP(op_EQ_41), CAST(64, MSB(CAST(32, MSB(op_AND_46), DUP(op_AND_46))), CAST(32, MSB(DUP(op_AND_46)), DUP(op_AND_46))), VARL("h_tmp561")); + RzILOpPure *op_AND_78 = LOGAND(cond_75, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_MUL_81 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_82 = SHIFTL0(CAST(64, IL_FALSE, op_AND_78), op_MUL_81); + RzILOpPure *op_OR_84 = LOGOR(CAST(64, IL_FALSE, op_AND_16), op_LSHIFT_82); + RzILOpEffect *op_ASSIGN_86 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_84)); + + // seq(seq(HYB(gcc_expr_if ((extract64(((ut64) ((st64) ((st32) ((Rs ...; + RzILOpEffect *seq_87 = SEQN(2, seq_74, op_ASSIGN_86); + + // seq(h_tmp560; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) ((st64 ...; + RzILOpEffect *seq_89 = seq_87; + + // seq(seq(h_tmp560; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) (( ...; + RzILOpEffect *seq_90 = SEQN(2, seq_89, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp560; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) (( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_91 = REPEAT(op_LT_4, seq_90); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp560; seq(seq(HYB(g ...; + RzILOpEffect *seq_92 = SEQN(2, op_ASSIGN_2, for_91); + + RzILOpEffect *instruction_sequence = seq_92; + return instruction_sequence; +} + +// Rdd = vsatwuh(Rss) +RzILOpEffect *hex_il_op_s2_vsatwuh_nopack(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp562 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp562", VARL("i")); + + // seq(h_tmp562 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_50 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((extract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x10) == ((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) < ((st64) 0x0)) ? ((st64) 0x0) : (0x1 << 0x10) - ((st64) 0x1))); + RzILOpPure *op_MUL_21 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(Rss, op_MUL_21); + RzILOpPure *op_AND_24 = LOGAND(op_RSHIFT_22, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_33 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_34 = SHIFTRA(DUP(Rss), op_MUL_33); + RzILOpPure *op_AND_36 = LOGAND(op_RSHIFT_34, SN(64, 0xffffffff)); + RzILOpPure *op_EQ_40 = EQ(EXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_24), DUP(op_AND_24))), CAST(32, MSB(DUP(op_AND_24)), DUP(op_AND_24)))), SN(32, 0), SN(32, 16)), CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_36), DUP(op_AND_36))), CAST(32, MSB(DUP(op_AND_36)), DUP(op_AND_36))))); + RzILOpPure *op_MUL_52 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_53 = SHIFTRA(DUP(Rss), op_MUL_52); + RzILOpPure *op_AND_55 = LOGAND(op_RSHIFT_53, SN(64, 0xffffffff)); + RzILOpPure *op_LT_60 = SLT(CAST(64, MSB(CAST(32, MSB(op_AND_55), DUP(op_AND_55))), CAST(32, MSB(DUP(op_AND_55)), DUP(op_AND_55))), CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_64 = SHIFTL0(SN(64, 1), SN(32, 16)); + RzILOpPure *op_SUB_67 = SUB(op_LSHIFT_64, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_69 = ITE(op_LT_60, CAST(64, MSB(SN(32, 0)), SN(32, 0)), op_SUB_67); + RzILOpEffect *gcc_expr_70 = BRANCH(op_EQ_40, EMPTY(), set_usr_field_call_50); + + // h_tmp563 = HYB(gcc_expr_if ((extract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x10) == ((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) < ((st64) 0x0)) ? ((st64) 0x0) : (0x1 << 0x10) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_72 = SETL("h_tmp563", cond_69); + + // seq(HYB(gcc_expr_if ((extract64(((ut64) ((st64) ((st32) ((Rss >> ...; + RzILOpEffect *seq_73 = SEQN(2, gcc_expr_70, op_ASSIGN_hybrid_tmp_72); + + // Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ((((extract64(((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff)))), 0x0, 0x10) == ((ut64) ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))))) ? ((st64) ((st32) ((Rss >> i * 0x20) & 0xffffffff))) : h_tmp563) & 0xffffffff) << i * 0x20)); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffffffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_42 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_RSHIFT_43 = SHIFTRA(DUP(Rss), op_MUL_42); + RzILOpPure *op_AND_45 = LOGAND(op_RSHIFT_43, SN(64, 0xffffffff)); + RzILOpPure *cond_74 = ITE(DUP(op_EQ_40), CAST(64, MSB(CAST(32, MSB(op_AND_45), DUP(op_AND_45))), CAST(32, MSB(DUP(op_AND_45)), DUP(op_AND_45))), VARL("h_tmp563")); + RzILOpPure *op_AND_76 = LOGAND(cond_74, SN(64, 0xffffffff)); + RzILOpPure *op_MUL_78 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_79 = SHIFTL0(op_AND_76, op_MUL_78); + RzILOpPure *op_OR_80 = LOGOR(op_AND_15, op_LSHIFT_79); + RzILOpEffect *op_ASSIGN_81 = WRITE_REG(bundle, Rdd_op, op_OR_80); + + // seq(seq(HYB(gcc_expr_if ((extract64(((ut64) ((st64) ((st32) ((Rs ...; + RzILOpEffect *seq_82 = SEQN(2, seq_73, op_ASSIGN_81); + + // seq(h_tmp562; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) ((st64 ...; + RzILOpEffect *seq_84 = seq_82; + + // seq(seq(h_tmp562; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) (( ...; + RzILOpEffect *seq_85 = SEQN(2, seq_84, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp562; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) (( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_86 = REPEAT(op_LT_4, seq_85); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp562; seq(seq(HYB(g ...; + RzILOpEffect *seq_87 = SEQN(2, op_ASSIGN_2, for_86); + + RzILOpEffect *instruction_sequence = seq_87; + return instruction_sequence; +} + +// Rd = vsplatb(Rs) +RzILOpEffect *hex_il_op_s2_vsplatrb(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp564 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp564", VARL("i")); + + // seq(h_tmp564 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xff << i * 0x8)))) | (((ut64) (((st64) ((st32) ((st8) ((Rs >> 0x0) & 0xff)))) & 0xff)) << i * 0x8))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_16 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_14); + RzILOpPure *op_RSHIFT_21 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_23 = LOGAND(op_RSHIFT_21, SN(32, 0xff)); + RzILOpPure *op_AND_28 = LOGAND(CAST(64, MSB(CAST(32, MSB(CAST(8, MSB(op_AND_23), DUP(op_AND_23))), CAST(8, MSB(DUP(op_AND_23)), DUP(op_AND_23)))), CAST(32, MSB(CAST(8, MSB(DUP(op_AND_23)), DUP(op_AND_23))), CAST(8, MSB(DUP(op_AND_23)), DUP(op_AND_23)))), SN(64, 0xff)); + RzILOpPure *op_MUL_31 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_32 = SHIFTL0(CAST(64, IL_FALSE, op_AND_28), op_MUL_31); + RzILOpPure *op_OR_34 = LOGOR(CAST(64, IL_FALSE, op_AND_16), op_LSHIFT_32); + RzILOpEffect *op_ASSIGN_36 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_34)); + + // seq(h_tmp564; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xff << i ...; + RzILOpEffect *seq_38 = op_ASSIGN_36; + + // seq(seq(h_tmp564; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xff < ...; + RzILOpEffect *seq_39 = SEQN(2, seq_38, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp564; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xff < ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_40 = REPEAT(op_LT_4, seq_39); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp564; Rd = ((st32) ...; + RzILOpEffect *seq_41 = SEQN(2, op_ASSIGN_2, for_40); + + RzILOpEffect *instruction_sequence = seq_41; + return instruction_sequence; +} + +// Rdd = vsplath(Rs) +RzILOpEffect *hex_il_op_s2_vsplatrh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp565 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp565", VARL("i")); + + // seq(h_tmp565 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((st16) ((Rs >> 0x0) & 0xffff))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_RSHIFT_20 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_20, SN(32, 0xffff)); + RzILOpPure *op_AND_26 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_22), DUP(op_AND_22))), CAST(16, MSB(DUP(op_AND_22)), DUP(op_AND_22))), SN(32, 0xffff)); + RzILOpPure *op_MUL_29 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_30 = SHIFTL0(CAST(64, IL_FALSE, op_AND_26), op_MUL_29); + RzILOpPure *op_OR_32 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_30); + RzILOpEffect *op_ASSIGN_34 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_32)); + + // seq(h_tmp565; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x1 ...; + RzILOpEffect *seq_36 = op_ASSIGN_34; + + // seq(seq(h_tmp565; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_37 = SEQN(2, seq_36, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp565; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_38 = REPEAT(op_LT_4, seq_37); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp565; Rdd = ((st64) ...; + RzILOpEffect *seq_39 = SEQN(2, op_ASSIGN_2, for_38); + + RzILOpEffect *instruction_sequence = seq_39; + return instruction_sequence; +} + +// Rdd = vspliceb(Rss,Rtt,Ii) +RzILOpEffect *hex_il_op_s2_vspliceib(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rdd = ((st64) (((ut64) ((u * ((ut32) 0x8) >= ((ut32) 0x40)) ? ((st64) 0x0) : (Rtt << u * ((ut32) 0x8)))) | ((u * ((ut32) 0x8) != ((ut32) 0x0)) ? extract64(((ut64) Rss), 0x0, ((st32) u * ((ut32) 0x8))) : ((ut64) 0x0)))); + RzILOpPure *op_MUL_5 = MUL(VARL("u"), CAST(32, IL_FALSE, SN(32, 8))); + RzILOpPure *op_GE_11 = UGE(op_MUL_5, CAST(32, IL_FALSE, SN(32, 0x40))); + RzILOpPure *op_MUL_15 = MUL(VARL("u"), CAST(32, IL_FALSE, SN(32, 8))); + RzILOpPure *op_LSHIFT_16 = SHIFTL0(Rtt, op_MUL_15); + RzILOpPure *cond_18 = ITE(op_GE_11, CAST(64, MSB(SN(32, 0)), SN(32, 0)), op_LSHIFT_16); + RzILOpPure *op_MUL_21 = MUL(VARL("u"), CAST(32, IL_FALSE, SN(32, 8))); + RzILOpPure *op_NE_24 = INV(EQ(op_MUL_21, CAST(32, IL_FALSE, SN(32, 0)))); + RzILOpPure *op_MUL_29 = MUL(VARL("u"), CAST(32, IL_FALSE, SN(32, 8))); + RzILOpPure *cond_35 = ITE(op_NE_24, EXTRACT64(CAST(64, IL_FALSE, Rss), SN(32, 0), CAST(32, IL_FALSE, op_MUL_29)), CAST(64, IL_FALSE, SN(64, 0))); + RzILOpPure *op_OR_37 = LOGOR(CAST(64, IL_FALSE, cond_18), cond_35); + RzILOpEffect *op_ASSIGN_39 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_37)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_39); + return instruction_sequence; +} + +// Rdd = vspliceb(Rss,Rtt,Pu) +RzILOpEffect *hex_il_op_s2_vsplicerb(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Pu_op = ISA2REG(hi, 'u', false); + RzILOpPure *Pu = READ_REG(pkt, Pu_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // Rdd = ((st64) (((ut64) (((((st32) Pu) & 0x7) * 0x8 >= 0x40) ? ((st64) 0x0) : (Rtt << (((st32) Pu) & 0x7) * 0x8))) | (((((st32) Pu) & 0x7) * 0x8 != 0x0) ? extract64(((ut64) Rss), 0x0, (((st32) Pu) & 0x7) * 0x8) : ((ut64) 0x0)))); + RzILOpPure *op_AND_4 = LOGAND(CAST(32, MSB(Pu), DUP(Pu)), SN(32, 7)); + RzILOpPure *op_MUL_6 = MUL(op_AND_4, SN(32, 8)); + RzILOpPure *op_GE_11 = SGE(op_MUL_6, SN(32, 0x40)); + RzILOpPure *op_AND_15 = LOGAND(CAST(32, MSB(DUP(Pu)), DUP(Pu)), SN(32, 7)); + RzILOpPure *op_MUL_17 = MUL(op_AND_15, SN(32, 8)); + RzILOpPure *op_LSHIFT_18 = SHIFTL0(Rtt, op_MUL_17); + RzILOpPure *cond_20 = ITE(op_GE_11, CAST(64, MSB(SN(32, 0)), SN(32, 0)), op_LSHIFT_18); + RzILOpPure *op_AND_23 = LOGAND(CAST(32, MSB(DUP(Pu)), DUP(Pu)), SN(32, 7)); + RzILOpPure *op_MUL_25 = MUL(op_AND_23, SN(32, 8)); + RzILOpPure *op_NE_27 = INV(EQ(op_MUL_25, SN(32, 0))); + RzILOpPure *op_AND_32 = LOGAND(CAST(32, MSB(DUP(Pu)), DUP(Pu)), SN(32, 7)); + RzILOpPure *op_MUL_34 = MUL(op_AND_32, SN(32, 8)); + RzILOpPure *cond_39 = ITE(op_NE_27, EXTRACT64(CAST(64, IL_FALSE, Rss), SN(32, 0), op_MUL_34), CAST(64, IL_FALSE, SN(64, 0))); + RzILOpPure *op_OR_41 = LOGOR(CAST(64, IL_FALSE, cond_20), cond_39); + RzILOpEffect *op_ASSIGN_43 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_41)); + + RzILOpEffect *instruction_sequence = op_ASSIGN_43; + return instruction_sequence; +} + +// Rdd = vsxtbh(Rs) +RzILOpEffect *hex_il_op_s2_vsxtbh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp566 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp566", VARL("i")); + + // seq(h_tmp566 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((st8) ((Rs >> i * 0x8) & 0xff))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rs, op_MUL_18); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_19, SN(32, 0xff)); + RzILOpPure *op_AND_25 = LOGAND(CAST(32, MSB(CAST(8, MSB(op_AND_21), DUP(op_AND_21))), CAST(8, MSB(DUP(op_AND_21)), DUP(op_AND_21))), SN(32, 0xffff)); + RzILOpPure *op_MUL_28 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_29 = SHIFTL0(CAST(64, IL_FALSE, op_AND_25), op_MUL_28); + RzILOpPure *op_OR_31 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_29); + RzILOpEffect *op_ASSIGN_33 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_31)); + + // seq(h_tmp566; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x1 ...; + RzILOpEffect *seq_35 = op_ASSIGN_33; + + // seq(seq(h_tmp566; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_36 = SEQN(2, seq_35, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp566; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_37 = REPEAT(op_LT_4, seq_36); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp566; Rdd = ((st64) ...; + RzILOpEffect *seq_38 = SEQN(2, op_ASSIGN_2, for_37); + + RzILOpEffect *instruction_sequence = seq_38; + return instruction_sequence; +} + +// Rdd = vsxthw(Rs) +RzILOpEffect *hex_il_op_s2_vsxthw(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp567 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp567", VARL("i")); + + // seq(h_tmp567 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ((((st64) ((st32) ((st16) ((Rs >> i * 0x10) & 0xffff)))) & 0xffffffff) << i * 0x20)); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffffffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rs, op_MUL_18); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_19, SN(32, 0xffff)); + RzILOpPure *op_AND_26 = LOGAND(CAST(64, MSB(CAST(32, MSB(CAST(16, MSB(op_AND_21), DUP(op_AND_21))), CAST(16, MSB(DUP(op_AND_21)), DUP(op_AND_21)))), CAST(32, MSB(CAST(16, MSB(DUP(op_AND_21)), DUP(op_AND_21))), CAST(16, MSB(DUP(op_AND_21)), DUP(op_AND_21)))), SN(64, 0xffffffff)); + RzILOpPure *op_MUL_28 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_29 = SHIFTL0(op_AND_26, op_MUL_28); + RzILOpPure *op_OR_30 = LOGOR(op_AND_15, op_LSHIFT_29); + RzILOpEffect *op_ASSIGN_31 = WRITE_REG(bundle, Rdd_op, op_OR_30); + + // seq(h_tmp567; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ((((s ...; + RzILOpEffect *seq_33 = op_ASSIGN_31; + + // seq(seq(h_tmp567; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ( ...; + RzILOpEffect *seq_34 = SEQN(2, seq_33, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp567; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_35 = REPEAT(op_LT_4, seq_34); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp567; Rdd = ((Rdd & ...; + RzILOpEffect *seq_36 = SEQN(2, op_ASSIGN_2, for_35); + + RzILOpEffect *instruction_sequence = seq_36; + return instruction_sequence; +} + +// Rd = vtrunehb(Rss) +RzILOpEffect *hex_il_op_s2_vtrunehb(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp568 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp568", VARL("i")); + + // seq(h_tmp568 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xff << i * 0x8)))) | (((ut64) (((st64) ((st32) ((st8) ((Rss >> i * 0x2 * 0x8) & ((st64) 0xff))))) & 0xff)) << i * 0x8))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_16 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_14); + RzILOpPure *op_MUL_19 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_MUL_21 = MUL(op_MUL_19, SN(32, 8)); + RzILOpPure *op_RSHIFT_22 = SHIFTRA(Rss, op_MUL_21); + RzILOpPure *op_AND_25 = LOGAND(op_RSHIFT_22, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_AND_30 = LOGAND(CAST(64, MSB(CAST(32, MSB(CAST(8, MSB(op_AND_25), DUP(op_AND_25))), CAST(8, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), CAST(32, MSB(CAST(8, MSB(DUP(op_AND_25)), DUP(op_AND_25))), CAST(8, MSB(DUP(op_AND_25)), DUP(op_AND_25)))), SN(64, 0xff)); + RzILOpPure *op_MUL_33 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_34 = SHIFTL0(CAST(64, IL_FALSE, op_AND_30), op_MUL_33); + RzILOpPure *op_OR_36 = LOGOR(CAST(64, IL_FALSE, op_AND_16), op_LSHIFT_34); + RzILOpEffect *op_ASSIGN_38 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_36)); + + // seq(h_tmp568; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xff << i ...; + RzILOpEffect *seq_40 = op_ASSIGN_38; + + // seq(seq(h_tmp568; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xff < ...; + RzILOpEffect *seq_41 = SEQN(2, seq_40, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp568; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xff < ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_42 = REPEAT(op_LT_4, seq_41); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp568; Rd = ((st32) ...; + RzILOpEffect *seq_43 = SEQN(2, op_ASSIGN_2, for_42); + + RzILOpEffect *instruction_sequence = seq_43; + return instruction_sequence; +} + +// Rdd = vtrunewh(Rss,Rtt) +RzILOpEffect *hex_il_op_s2_vtrunewh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x0)))) | (((ut64) (((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))) & 0xffff)) << 0x0))); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_12 = SHIFTRA(Rtt, SN(32, 0)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_12, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_19 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_15), DUP(op_AND_15))), CAST(16, MSB(DUP(op_AND_15)), DUP(op_AND_15))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(CAST(64, IL_FALSE, op_AND_19), SN(32, 0)); + RzILOpPure *op_OR_26 = LOGOR(CAST(64, IL_FALSE, op_AND_7), op_LSHIFT_24); + RzILOpEffect *op_ASSIGN_28 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_26)); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x10)))) | (((ut64) (((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))) & 0xffff)) << 0x10))); + RzILOpPure *op_LSHIFT_34 = SHIFTL0(SN(64, 0xffff), SN(32, 16)); + RzILOpPure *op_NOT_35 = LOGNOT(op_LSHIFT_34); + RzILOpPure *op_AND_36 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_35); + RzILOpPure *op_RSHIFT_40 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_43 = LOGAND(op_RSHIFT_40, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_47 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_43), DUP(op_AND_43))), CAST(16, MSB(DUP(op_AND_43)), DUP(op_AND_43))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_52 = SHIFTL0(CAST(64, IL_FALSE, op_AND_47), SN(32, 16)); + RzILOpPure *op_OR_54 = LOGOR(CAST(64, IL_FALSE, op_AND_36), op_LSHIFT_52); + RzILOpEffect *op_ASSIGN_56 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_54)); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x20)))) | (((ut64) (((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) & 0xffff)) << 0x20))); + RzILOpPure *op_LSHIFT_62 = SHIFTL0(SN(64, 0xffff), SN(32, 0x20)); + RzILOpPure *op_NOT_63 = LOGNOT(op_LSHIFT_62); + RzILOpPure *op_AND_64 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_63); + RzILOpPure *op_RSHIFT_69 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_72 = LOGAND(op_RSHIFT_69, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_76 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_72), DUP(op_AND_72))), CAST(16, MSB(DUP(op_AND_72)), DUP(op_AND_72))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_81 = SHIFTL0(CAST(64, IL_FALSE, op_AND_76), SN(32, 0x20)); + RzILOpPure *op_OR_83 = LOGOR(CAST(64, IL_FALSE, op_AND_64), op_LSHIFT_81); + RzILOpEffect *op_ASSIGN_85 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_83)); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x30)))) | (((ut64) (((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) & 0xffff)) << 0x30))); + RzILOpPure *op_LSHIFT_91 = SHIFTL0(SN(64, 0xffff), SN(32, 0x30)); + RzILOpPure *op_NOT_92 = LOGNOT(op_LSHIFT_91); + RzILOpPure *op_AND_93 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_92); + RzILOpPure *op_RSHIFT_97 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_100 = LOGAND(op_RSHIFT_97, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_104 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_100), DUP(op_AND_100))), CAST(16, MSB(DUP(op_AND_100)), DUP(op_AND_100))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_109 = SHIFTL0(CAST(64, IL_FALSE, op_AND_104), SN(32, 0x30)); + RzILOpPure *op_OR_111 = LOGOR(CAST(64, IL_FALSE, op_AND_93), op_LSHIFT_109); + RzILOpEffect *op_ASSIGN_113 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_111)); + + RzILOpEffect *instruction_sequence = SEQN(4, op_ASSIGN_28, op_ASSIGN_56, op_ASSIGN_85, op_ASSIGN_113); + return instruction_sequence; +} + +// Rd = vtrunohb(Rss) +RzILOpEffect *hex_il_op_s2_vtrunohb(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp569 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp569", VARL("i")); + + // seq(h_tmp569 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xff << i * 0x8)))) | (((ut64) (((st64) ((st32) ((st8) ((Rss >> i * 0x2 + 0x1 * 0x8) & ((st64) 0xff))))) & 0xff)) << i * 0x8))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_16 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_14); + RzILOpPure *op_MUL_19 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_ADD_21 = ADD(op_MUL_19, SN(32, 1)); + RzILOpPure *op_MUL_23 = MUL(op_ADD_21, SN(32, 8)); + RzILOpPure *op_RSHIFT_24 = SHIFTRA(Rss, op_MUL_23); + RzILOpPure *op_AND_27 = LOGAND(op_RSHIFT_24, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_AND_32 = LOGAND(CAST(64, MSB(CAST(32, MSB(CAST(8, MSB(op_AND_27), DUP(op_AND_27))), CAST(8, MSB(DUP(op_AND_27)), DUP(op_AND_27)))), CAST(32, MSB(CAST(8, MSB(DUP(op_AND_27)), DUP(op_AND_27))), CAST(8, MSB(DUP(op_AND_27)), DUP(op_AND_27)))), SN(64, 0xff)); + RzILOpPure *op_MUL_35 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_36 = SHIFTL0(CAST(64, IL_FALSE, op_AND_32), op_MUL_35); + RzILOpPure *op_OR_38 = LOGOR(CAST(64, IL_FALSE, op_AND_16), op_LSHIFT_36); + RzILOpEffect *op_ASSIGN_40 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_38)); + + // seq(h_tmp569; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xff << i ...; + RzILOpEffect *seq_42 = op_ASSIGN_40; + + // seq(seq(h_tmp569; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xff < ...; + RzILOpEffect *seq_43 = SEQN(2, seq_42, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp569; Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xff < ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_44 = REPEAT(op_LT_4, seq_43); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp569; Rd = ((st32) ...; + RzILOpEffect *seq_45 = SEQN(2, op_ASSIGN_2, for_44); + + RzILOpEffect *instruction_sequence = seq_45; + return instruction_sequence; +} + +// Rdd = vtrunowh(Rss,Rtt) +RzILOpEffect *hex_il_op_s2_vtrunowh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x0)))) | (((ut64) (((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))) & 0xffff)) << 0x0))); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_12 = SHIFTRA(Rtt, SN(32, 16)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_12, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_19 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_15), DUP(op_AND_15))), CAST(16, MSB(DUP(op_AND_15)), DUP(op_AND_15))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(CAST(64, IL_FALSE, op_AND_19), SN(32, 0)); + RzILOpPure *op_OR_26 = LOGOR(CAST(64, IL_FALSE, op_AND_7), op_LSHIFT_24); + RzILOpEffect *op_ASSIGN_28 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_26)); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x10)))) | (((ut64) (((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))) & 0xffff)) << 0x10))); + RzILOpPure *op_LSHIFT_34 = SHIFTL0(SN(64, 0xffff), SN(32, 16)); + RzILOpPure *op_NOT_35 = LOGNOT(op_LSHIFT_34); + RzILOpPure *op_AND_36 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_35); + RzILOpPure *op_RSHIFT_40 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_43 = LOGAND(op_RSHIFT_40, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_47 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_43), DUP(op_AND_43))), CAST(16, MSB(DUP(op_AND_43)), DUP(op_AND_43))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_52 = SHIFTL0(CAST(64, IL_FALSE, op_AND_47), SN(32, 16)); + RzILOpPure *op_OR_54 = LOGOR(CAST(64, IL_FALSE, op_AND_36), op_LSHIFT_52); + RzILOpEffect *op_ASSIGN_56 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_54)); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x20)))) | (((ut64) (((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) & 0xffff)) << 0x20))); + RzILOpPure *op_LSHIFT_62 = SHIFTL0(SN(64, 0xffff), SN(32, 0x20)); + RzILOpPure *op_NOT_63 = LOGNOT(op_LSHIFT_62); + RzILOpPure *op_AND_64 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_63); + RzILOpPure *op_RSHIFT_69 = SHIFTRA(Rss, SN(32, 16)); + RzILOpPure *op_AND_72 = LOGAND(op_RSHIFT_69, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_76 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_72), DUP(op_AND_72))), CAST(16, MSB(DUP(op_AND_72)), DUP(op_AND_72))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_81 = SHIFTL0(CAST(64, IL_FALSE, op_AND_76), SN(32, 0x20)); + RzILOpPure *op_OR_83 = LOGOR(CAST(64, IL_FALSE, op_AND_64), op_LSHIFT_81); + RzILOpEffect *op_ASSIGN_85 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_83)); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x30)))) | (((ut64) (((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) & 0xffff)) << 0x30))); + RzILOpPure *op_LSHIFT_91 = SHIFTL0(SN(64, 0xffff), SN(32, 0x30)); + RzILOpPure *op_NOT_92 = LOGNOT(op_LSHIFT_91); + RzILOpPure *op_AND_93 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_92); + RzILOpPure *op_RSHIFT_97 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_100 = LOGAND(op_RSHIFT_97, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_AND_104 = LOGAND(CAST(32, MSB(CAST(16, MSB(op_AND_100), DUP(op_AND_100))), CAST(16, MSB(DUP(op_AND_100)), DUP(op_AND_100))), SN(32, 0xffff)); + RzILOpPure *op_LSHIFT_109 = SHIFTL0(CAST(64, IL_FALSE, op_AND_104), SN(32, 0x30)); + RzILOpPure *op_OR_111 = LOGOR(CAST(64, IL_FALSE, op_AND_93), op_LSHIFT_109); + RzILOpEffect *op_ASSIGN_113 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_111)); + + RzILOpEffect *instruction_sequence = SEQN(4, op_ASSIGN_28, op_ASSIGN_56, op_ASSIGN_85, op_ASSIGN_113); + return instruction_sequence; +} + +// Rdd = vzxtbh(Rs) +RzILOpEffect *hex_il_op_s2_vzxtbh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp570 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp570", VARL("i")); + + // seq(h_tmp570 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) (((st32) ((ut8) ((Rs >> i * 0x8) & 0xff))) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rs, op_MUL_18); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_19, SN(32, 0xff)); + RzILOpPure *op_AND_25 = LOGAND(CAST(32, IL_FALSE, CAST(8, IL_FALSE, op_AND_21)), SN(32, 0xffff)); + RzILOpPure *op_MUL_28 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_29 = SHIFTL0(CAST(64, IL_FALSE, op_AND_25), op_MUL_28); + RzILOpPure *op_OR_31 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_29); + RzILOpEffect *op_ASSIGN_33 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_31)); + + // seq(h_tmp570; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x1 ...; + RzILOpEffect *seq_35 = op_ASSIGN_33; + + // seq(seq(h_tmp570; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_36 = SEQN(2, seq_35, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp570; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_37 = REPEAT(op_LT_4, seq_36); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp570; Rdd = ((st64) ...; + RzILOpEffect *seq_38 = SEQN(2, op_ASSIGN_2, for_37); + + RzILOpEffect *instruction_sequence = seq_38; + return instruction_sequence; +} + +// Rdd = vzxthw(Rs) +RzILOpEffect *hex_il_op_s2_vzxthw(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp571 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp571", VARL("i")); + + // seq(h_tmp571 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ((((st64) ((st32) ((ut16) ((Rs >> i * 0x10) & 0xffff)))) & 0xffffffff) << i * 0x20)); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffffffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rs, op_MUL_18); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_19, SN(32, 0xffff)); + RzILOpPure *op_AND_26 = LOGAND(CAST(64, MSB(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_AND_21))), CAST(32, IL_FALSE, CAST(16, IL_FALSE, DUP(op_AND_21)))), SN(64, 0xffffffff)); + RzILOpPure *op_MUL_28 = MUL(VARL("i"), SN(32, 0x20)); + RzILOpPure *op_LSHIFT_29 = SHIFTL0(op_AND_26, op_MUL_28); + RzILOpPure *op_OR_30 = LOGOR(op_AND_15, op_LSHIFT_29); + RzILOpEffect *op_ASSIGN_31 = WRITE_REG(bundle, Rdd_op, op_OR_30); + + // seq(h_tmp571; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ((((s ...; + RzILOpEffect *seq_33 = op_ASSIGN_31; + + // seq(seq(h_tmp571; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ( ...; + RzILOpEffect *seq_34 = SEQN(2, seq_33, seq_8); + + // while ((i < 0x2)) { seq(seq(h_tmp571; Rdd = ((Rdd & (~(0xffffffff << i * 0x20))) | ( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 2)); + RzILOpEffect *for_35 = REPEAT(op_LT_4, seq_34); + + // seq(i = 0x0; while ((i < 0x2)) { seq(seq(h_tmp571; Rdd = ((Rdd & ...; + RzILOpEffect *seq_36 = SEQN(2, op_ASSIGN_2, for_35); + + RzILOpEffect *instruction_sequence = seq_36; + return instruction_sequence; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_S4_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_S4_ops.c new file mode 100644 index 00000000000..56b4b0cd6fd --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_S4_ops.c @@ -0,0 +1,6774 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// Rd = add(Rs,add(Ru,Ii)) +RzILOpEffect *hex_il_op_s4_addaddi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + + // s = s; + RzILOpEffect *imm_assign_4 = SETL("s", s); + + // Rd = Rs + Ru + s; + RzILOpPure *op_ADD_3 = ADD(Rs, Ru); + RzILOpPure *op_ADD_6 = ADD(op_ADD_3, VARL("s")); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rd_op, op_ADD_6); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, op_ASSIGN_7); + return instruction_sequence; +} + +// Rx = add(Ii,asl(Rxin,II)) +RzILOpEffect *hex_il_op_s4_addi_asl_ri(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // U = U; + RzILOpEffect *imm_assign_3 = SETL("U", U); + + // Rx = ((st32) u + ((ut32) (Rx << U))); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(READ_REG(pkt, Rx_op, false), VARL("U")); + RzILOpPure *op_ADD_7 = ADD(VARL("u"), CAST(32, IL_FALSE, op_LSHIFT_5)); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_ADD_7)); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_1, imm_assign_3, op_ASSIGN_9); + return instruction_sequence; +} + +// Rx = add(Ii,lsr(Rxin,II)) +RzILOpEffect *hex_il_op_s4_addi_lsr_ri(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // U = U; + RzILOpEffect *imm_assign_4 = SETL("U", U); + + // Rx = ((st32) u + (((ut32) Rx) >> U)); + RzILOpPure *op_RSHIFT_6 = SHIFTR0(CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false)), VARL("U")); + RzILOpPure *op_ADD_7 = ADD(VARL("u"), op_RSHIFT_6); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_ADD_7)); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_1, imm_assign_4, op_ASSIGN_9); + return instruction_sequence; +} + +// Rx = and(Ii,asl(Rxin,II)) +RzILOpEffect *hex_il_op_s4_andi_asl_ri(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // U = U; + RzILOpEffect *imm_assign_3 = SETL("U", U); + + // Rx = ((st32) (u & ((ut32) (Rx << U)))); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(READ_REG(pkt, Rx_op, false), VARL("U")); + RzILOpPure *op_AND_7 = LOGAND(VARL("u"), CAST(32, IL_FALSE, op_LSHIFT_5)); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_AND_7)); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_1, imm_assign_3, op_ASSIGN_9); + return instruction_sequence; +} + +// Rx = and(Ii,lsr(Rxin,II)) +RzILOpEffect *hex_il_op_s4_andi_lsr_ri(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // U = U; + RzILOpEffect *imm_assign_4 = SETL("U", U); + + // Rx = ((st32) (u & (((ut32) Rx) >> U))); + RzILOpPure *op_RSHIFT_6 = SHIFTR0(CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false)), VARL("U")); + RzILOpPure *op_AND_7 = LOGAND(VARL("u"), op_RSHIFT_6); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_AND_7)); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_1, imm_assign_4, op_ASSIGN_9); + return instruction_sequence; +} + +// Rd = add(clb(Rs),Ii) +RzILOpEffect *hex_il_op_s4_clbaddi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + + // clo32(((ut32) Rs)); + RzILOpEffect *clo32_call_3 = hex_clo32(CAST(32, IL_FALSE, Rs)); + + // h_tmp572 = clo32(((ut32) Rs)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_5 = SETL("h_tmp572", UNSIGNED(32, VARL("ret_val"))); + + // seq(clo32(((ut32) Rs)); h_tmp572 = clo32(((ut32) Rs))); + RzILOpEffect *seq_6 = SEQN(2, clo32_call_3, op_ASSIGN_hybrid_tmp_5); + + // clo32(((ut32) (~Rs))); + RzILOpPure *op_NOT_7 = LOGNOT(DUP(Rs)); + RzILOpEffect *clo32_call_9 = hex_clo32(CAST(32, IL_FALSE, op_NOT_7)); + + // h_tmp573 = clo32(((ut32) (~Rs))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_11 = SETL("h_tmp573", UNSIGNED(32, VARL("ret_val"))); + + // seq(clo32(((ut32) (~Rs))); h_tmp573 = clo32(((ut32) (~Rs)))); + RzILOpEffect *seq_12 = SEQN(2, clo32_call_9, op_ASSIGN_hybrid_tmp_11); + + // clo32(((ut32) Rs)); + RzILOpEffect *clo32_call_15 = hex_clo32(CAST(32, IL_FALSE, DUP(Rs))); + + // h_tmp574 = clo32(((ut32) Rs)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_17 = SETL("h_tmp574", UNSIGNED(32, VARL("ret_val"))); + + // seq(clo32(((ut32) Rs)); h_tmp574 = clo32(((ut32) Rs))); + RzILOpEffect *seq_18 = SEQN(2, clo32_call_15, op_ASSIGN_hybrid_tmp_17); + + // clo32(((ut32) (~Rs))); + RzILOpPure *op_NOT_19 = LOGNOT(DUP(Rs)); + RzILOpEffect *clo32_call_21 = hex_clo32(CAST(32, IL_FALSE, op_NOT_19)); + + // h_tmp575 = clo32(((ut32) (~Rs))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_23 = SETL("h_tmp575", UNSIGNED(32, VARL("ret_val"))); + + // seq(clo32(((ut32) (~Rs))); h_tmp575 = clo32(((ut32) (~Rs)))); + RzILOpEffect *seq_24 = SEQN(2, clo32_call_21, op_ASSIGN_hybrid_tmp_23); + + // s = s; + RzILOpEffect *imm_assign_26 = SETL("s", s); + + // Rd = ((st32) ((h_tmp572 > h_tmp573) ? h_tmp574 : h_tmp575) + ((ut32) s)); + RzILOpPure *op_GT_13 = UGT(VARL("h_tmp572"), VARL("h_tmp573")); + RzILOpPure *cond_25 = ITE(op_GT_13, VARL("h_tmp574"), VARL("h_tmp575")); + RzILOpPure *op_ADD_29 = ADD(cond_25, CAST(32, IL_FALSE, VARL("s"))); + RzILOpEffect *op_ASSIGN_31 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_ADD_29)); + + // seq(seq(clo32(((ut32) Rs)); h_tmp572 = clo32(((ut32) Rs))); seq( ...; + RzILOpEffect *seq_32 = SEQN(5, seq_6, seq_12, seq_18, seq_24, op_ASSIGN_31); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_26, seq_32); + return instruction_sequence; +} + +// Rd = add(clb(Rss),Ii) +RzILOpEffect *hex_il_op_s4_clbpaddi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + + // clo64(((ut64) Rss)); + RzILOpEffect *clo64_call_3 = hex_clo64(CAST(64, IL_FALSE, Rss)); + + // h_tmp576 = clo64(((ut64) Rss)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_5 = SETL("h_tmp576", UNSIGNED(64, VARL("ret_val"))); + + // seq(clo64(((ut64) Rss)); h_tmp576 = clo64(((ut64) Rss))); + RzILOpEffect *seq_6 = SEQN(2, clo64_call_3, op_ASSIGN_hybrid_tmp_5); + + // clo64(((ut64) (~Rss))); + RzILOpPure *op_NOT_7 = LOGNOT(DUP(Rss)); + RzILOpEffect *clo64_call_9 = hex_clo64(CAST(64, IL_FALSE, op_NOT_7)); + + // h_tmp577 = clo64(((ut64) (~Rss))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_11 = SETL("h_tmp577", UNSIGNED(64, VARL("ret_val"))); + + // seq(clo64(((ut64) (~Rss))); h_tmp577 = clo64(((ut64) (~Rss)))); + RzILOpEffect *seq_12 = SEQN(2, clo64_call_9, op_ASSIGN_hybrid_tmp_11); + + // clo64(((ut64) Rss)); + RzILOpEffect *clo64_call_15 = hex_clo64(CAST(64, IL_FALSE, DUP(Rss))); + + // h_tmp578 = clo64(((ut64) Rss)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_17 = SETL("h_tmp578", UNSIGNED(64, VARL("ret_val"))); + + // seq(clo64(((ut64) Rss)); h_tmp578 = clo64(((ut64) Rss))); + RzILOpEffect *seq_18 = SEQN(2, clo64_call_15, op_ASSIGN_hybrid_tmp_17); + + // clo64(((ut64) (~Rss))); + RzILOpPure *op_NOT_19 = LOGNOT(DUP(Rss)); + RzILOpEffect *clo64_call_21 = hex_clo64(CAST(64, IL_FALSE, op_NOT_19)); + + // h_tmp579 = clo64(((ut64) (~Rss))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_23 = SETL("h_tmp579", UNSIGNED(64, VARL("ret_val"))); + + // seq(clo64(((ut64) (~Rss))); h_tmp579 = clo64(((ut64) (~Rss)))); + RzILOpEffect *seq_24 = SEQN(2, clo64_call_21, op_ASSIGN_hybrid_tmp_23); + + // s = s; + RzILOpEffect *imm_assign_26 = SETL("s", s); + + // Rd = ((st32) ((h_tmp576 > h_tmp577) ? h_tmp578 : h_tmp579) + ((ut64) s)); + RzILOpPure *op_GT_13 = UGT(VARL("h_tmp576"), VARL("h_tmp577")); + RzILOpPure *cond_25 = ITE(op_GT_13, VARL("h_tmp578"), VARL("h_tmp579")); + RzILOpPure *op_ADD_29 = ADD(cond_25, CAST(64, IL_FALSE, VARL("s"))); + RzILOpEffect *op_ASSIGN_31 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_ADD_29)); + + // seq(seq(clo64(((ut64) Rss)); h_tmp576 = clo64(((ut64) Rss))); se ...; + RzILOpEffect *seq_32 = SEQN(5, seq_6, seq_12, seq_18, seq_24, op_ASSIGN_31); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_26, seq_32); + return instruction_sequence; +} + +// Rd = normamt(Rss) +RzILOpEffect *hex_il_op_s4_clbpnorm(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // Rd = 0x0; + RzILOpEffect *op_ASSIGN_6 = WRITE_REG(bundle, Rd_op, SN(32, 0)); + + // clo64(((ut64) Rss)); + RzILOpEffect *clo64_call_8 = hex_clo64(CAST(64, IL_FALSE, Rss)); + + // h_tmp580 = clo64(((ut64) Rss)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_10 = SETL("h_tmp580", UNSIGNED(64, VARL("ret_val"))); + + // seq(clo64(((ut64) Rss)); h_tmp580 = clo64(((ut64) Rss))); + RzILOpEffect *seq_11 = SEQN(2, clo64_call_8, op_ASSIGN_hybrid_tmp_10); + + // clo64(((ut64) (~Rss))); + RzILOpPure *op_NOT_12 = LOGNOT(DUP(Rss)); + RzILOpEffect *clo64_call_14 = hex_clo64(CAST(64, IL_FALSE, op_NOT_12)); + + // h_tmp581 = clo64(((ut64) (~Rss))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_16 = SETL("h_tmp581", UNSIGNED(64, VARL("ret_val"))); + + // seq(clo64(((ut64) (~Rss))); h_tmp581 = clo64(((ut64) (~Rss)))); + RzILOpEffect *seq_17 = SEQN(2, clo64_call_14, op_ASSIGN_hybrid_tmp_16); + + // clo64(((ut64) Rss)); + RzILOpEffect *clo64_call_20 = hex_clo64(CAST(64, IL_FALSE, DUP(Rss))); + + // h_tmp582 = clo64(((ut64) Rss)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_22 = SETL("h_tmp582", UNSIGNED(64, VARL("ret_val"))); + + // seq(clo64(((ut64) Rss)); h_tmp582 = clo64(((ut64) Rss))); + RzILOpEffect *seq_23 = SEQN(2, clo64_call_20, op_ASSIGN_hybrid_tmp_22); + + // clo64(((ut64) (~Rss))); + RzILOpPure *op_NOT_24 = LOGNOT(DUP(Rss)); + RzILOpEffect *clo64_call_26 = hex_clo64(CAST(64, IL_FALSE, op_NOT_24)); + + // h_tmp583 = clo64(((ut64) (~Rss))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_28 = SETL("h_tmp583", UNSIGNED(64, VARL("ret_val"))); + + // seq(clo64(((ut64) (~Rss))); h_tmp583 = clo64(((ut64) (~Rss)))); + RzILOpEffect *seq_29 = SEQN(2, clo64_call_26, op_ASSIGN_hybrid_tmp_28); + + // Rd = ((st32) ((h_tmp580 > h_tmp581) ? h_tmp582 : h_tmp583) - ((ut64) 0x1)); + RzILOpPure *op_GT_18 = UGT(VARL("h_tmp580"), VARL("h_tmp581")); + RzILOpPure *cond_30 = ITE(op_GT_18, VARL("h_tmp582"), VARL("h_tmp583")); + RzILOpPure *op_SUB_33 = SUB(cond_30, CAST(64, IL_FALSE, SN(32, 1))); + RzILOpEffect *op_ASSIGN_35 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_SUB_33)); + + // seq(seq(clo64(((ut64) Rss)); h_tmp580 = clo64(((ut64) Rss))); se ...; + RzILOpEffect *seq_36 = SEQN(5, seq_11, seq_17, seq_23, seq_29, op_ASSIGN_35); + + // seq(Rd = 0x0); + RzILOpEffect *seq_then_37 = op_ASSIGN_6; + + // seq(seq(seq(clo64(((ut64) Rss)); h_tmp580 = clo64(((ut64) Rss))) ...; + RzILOpEffect *seq_else_38 = seq_36; + + // if ((Rss == ((st64) 0x0))) {seq(Rd = 0x0)} else {seq(seq(seq(clo64(((ut64) Rss)); h_tmp580 = clo64(((ut64) Rss))) ...}; + RzILOpPure *op_EQ_3 = EQ(DUP(Rss), CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpEffect *branch_39 = BRANCH(op_EQ_3, seq_then_37, seq_else_38); + + RzILOpEffect *instruction_sequence = branch_39; + return instruction_sequence; +} + +// Rd = extract(Rs,Ii,II) +RzILOpEffect *hex_il_op_s4_extract(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: st32 width; + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: st32 offset; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // width = ((st32) u); + RzILOpEffect *op_ASSIGN_3 = SETL("width", CAST(32, IL_FALSE, VARL("u"))); + + // U = U; + RzILOpEffect *imm_assign_5 = SETL("U", U); + + // offset = ((st32) U); + RzILOpEffect *op_ASSIGN_8 = SETL("offset", CAST(32, IL_FALSE, VARL("U"))); + + // Rd = ((st32) ((width != 0x0) ? sextract64(((ut64) (((ut32) Rs) >> offset)), 0x0, width) : 0x0)); + RzILOpPure *op_NE_12 = INV(EQ(VARL("width"), SN(32, 0))); + RzILOpPure *op_RSHIFT_15 = SHIFTR0(CAST(32, IL_FALSE, Rs), VARL("offset")); + RzILOpPure *cond_20 = ITE(op_NE_12, SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_15), SN(32, 0), VARL("width")), SN(64, 0)); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_20), DUP(cond_20))); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_0, imm_assign_5, op_ASSIGN_3, op_ASSIGN_8, op_ASSIGN_22); + return instruction_sequence; +} + +// Rd = extract(Rs,Rtt) +RzILOpEffect *hex_il_op_s4_extract_rp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + // Declare: st32 width; + // Declare: st32 offset; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // width = ((st32) extract64(((ut64) ((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff)))), 0x0, 0x6)); + RzILOpPure *op_RSHIFT_7 = SHIFTRA(Rtt, SN(32, 0x20)); + RzILOpPure *op_AND_9 = LOGAND(op_RSHIFT_7, SN(64, 0xffffffff)); + RzILOpEffect *op_ASSIGN_18 = SETL("width", CAST(32, IL_FALSE, EXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_9), DUP(op_AND_9))), CAST(32, MSB(DUP(op_AND_9)), DUP(op_AND_9)))), SN(32, 0), SN(32, 6)))); + + // offset = ((st32) sextract64(((ut64) ((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff)))), 0x0, 0x7)); + RzILOpPure *op_RSHIFT_26 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_28 = LOGAND(op_RSHIFT_26, SN(64, 0xffffffff)); + RzILOpEffect *op_ASSIGN_37 = SETL("offset", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_28), DUP(op_AND_28))), CAST(32, MSB(DUP(op_AND_28)), DUP(op_AND_28)))), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(DUP(op_AND_28)), DUP(op_AND_28))), CAST(32, MSB(DUP(op_AND_28)), DUP(op_AND_28)))), SN(32, 0), SN(32, 7)))); + + // Rd = ((st32) ((width != 0x0) ? sextract64(((offset < 0x0) ? ((((ut64) ((ut32) ((ut64) ((ut32) Rs)))) << (-offset) - 0x1) << 0x1) : (((ut64) ((ut32) ((ut64) ((ut32) Rs)))) >> offset)), 0x0, width) : 0x0)); + RzILOpPure *op_NE_41 = INV(EQ(VARL("width"), SN(32, 0))); + RzILOpPure *op_LT_43 = SLT(VARL("offset"), SN(32, 0)); + RzILOpPure *op_NEG_49 = NEG(VARL("offset")); + RzILOpPure *op_SUB_51 = SUB(op_NEG_49, SN(32, 1)); + RzILOpPure *op_LSHIFT_52 = SHIFTL0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, CAST(64, IL_FALSE, CAST(32, IL_FALSE, Rs)))), op_SUB_51); + RzILOpPure *op_LSHIFT_54 = SHIFTL0(op_LSHIFT_52, SN(32, 1)); + RzILOpPure *op_RSHIFT_59 = SHIFTR0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, CAST(64, IL_FALSE, CAST(32, IL_FALSE, DUP(Rs))))), VARL("offset")); + RzILOpPure *cond_60 = ITE(op_LT_43, op_LSHIFT_54, op_RSHIFT_59); + RzILOpPure *cond_64 = ITE(op_NE_41, SEXTRACT64(cond_60, SN(32, 0), VARL("width")), SN(64, 0)); + RzILOpEffect *op_ASSIGN_66 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(cond_64), DUP(cond_64))); + + RzILOpEffect *instruction_sequence = SEQN(3, op_ASSIGN_18, op_ASSIGN_37, op_ASSIGN_66); + return instruction_sequence; +} + +// Rdd = extract(Rss,Ii,II) +RzILOpEffect *hex_il_op_s4_extractp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: st32 width; + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: st32 offset; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // width = ((st32) u); + RzILOpEffect *op_ASSIGN_3 = SETL("width", CAST(32, IL_FALSE, VARL("u"))); + + // U = U; + RzILOpEffect *imm_assign_5 = SETL("U", U); + + // offset = ((st32) U); + RzILOpEffect *op_ASSIGN_8 = SETL("offset", CAST(32, IL_FALSE, VARL("U"))); + + // Rdd = ((width != 0x0) ? sextract64((((ut64) Rss) >> offset), 0x0, width) : 0x0); + RzILOpPure *op_NE_12 = INV(EQ(VARL("width"), SN(32, 0))); + RzILOpPure *op_RSHIFT_15 = SHIFTR0(CAST(64, IL_FALSE, Rss), VARL("offset")); + RzILOpPure *cond_19 = ITE(op_NE_12, SEXTRACT64(op_RSHIFT_15, SN(32, 0), VARL("width")), SN(64, 0)); + RzILOpEffect *op_ASSIGN_20 = WRITE_REG(bundle, Rdd_op, cond_19); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_0, imm_assign_5, op_ASSIGN_3, op_ASSIGN_8, op_ASSIGN_20); + return instruction_sequence; +} + +// Rdd = extract(Rss,Rtt) +RzILOpEffect *hex_il_op_s4_extractp_rp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + // Declare: st32 width; + // Declare: st32 offset; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // width = ((st32) extract64(((ut64) ((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff)))), 0x0, 0x6)); + RzILOpPure *op_RSHIFT_7 = SHIFTRA(Rtt, SN(32, 0x20)); + RzILOpPure *op_AND_9 = LOGAND(op_RSHIFT_7, SN(64, 0xffffffff)); + RzILOpEffect *op_ASSIGN_18 = SETL("width", CAST(32, IL_FALSE, EXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_9), DUP(op_AND_9))), CAST(32, MSB(DUP(op_AND_9)), DUP(op_AND_9)))), SN(32, 0), SN(32, 6)))); + + // offset = ((st32) sextract64(((ut64) ((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff)))), 0x0, 0x7)); + RzILOpPure *op_RSHIFT_26 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_28 = LOGAND(op_RSHIFT_26, SN(64, 0xffffffff)); + RzILOpEffect *op_ASSIGN_37 = SETL("offset", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(op_AND_28), DUP(op_AND_28))), CAST(32, MSB(DUP(op_AND_28)), DUP(op_AND_28)))), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, CAST(64, MSB(CAST(32, MSB(DUP(op_AND_28)), DUP(op_AND_28))), CAST(32, MSB(DUP(op_AND_28)), DUP(op_AND_28)))), SN(32, 0), SN(32, 7)))); + + // Rdd = ((width != 0x0) ? sextract64(((offset < 0x0) ? ((((ut64) Rss) << (-offset) - 0x1) << 0x1) : (((ut64) Rss) >> offset)), 0x0, width) : 0x0); + RzILOpPure *op_NE_41 = INV(EQ(VARL("width"), SN(32, 0))); + RzILOpPure *op_LT_43 = SLT(VARL("offset"), SN(32, 0)); + RzILOpPure *op_NEG_46 = NEG(VARL("offset")); + RzILOpPure *op_SUB_48 = SUB(op_NEG_46, SN(32, 1)); + RzILOpPure *op_LSHIFT_49 = SHIFTL0(CAST(64, IL_FALSE, Rss), op_SUB_48); + RzILOpPure *op_LSHIFT_51 = SHIFTL0(op_LSHIFT_49, SN(32, 1)); + RzILOpPure *op_RSHIFT_53 = SHIFTR0(CAST(64, IL_FALSE, DUP(Rss)), VARL("offset")); + RzILOpPure *cond_54 = ITE(op_LT_43, op_LSHIFT_51, op_RSHIFT_53); + RzILOpPure *cond_58 = ITE(op_NE_41, SEXTRACT64(cond_54, SN(32, 0), VARL("width")), SN(64, 0)); + RzILOpEffect *op_ASSIGN_59 = WRITE_REG(bundle, Rdd_op, cond_58); + + RzILOpEffect *instruction_sequence = SEQN(3, op_ASSIGN_18, op_ASSIGN_37, op_ASSIGN_59); + return instruction_sequence; +} + +// Rd = lsl(Ii,Rt) +RzILOpEffect *hex_il_op_s4_lsli(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + // Declare: st32 shamt; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + + // shamt = ((st32) sextract64(((ut64) Rt), 0x0, 0x7)); + RzILOpEffect *op_ASSIGN_10 = SETL("shamt", CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7)))); + + // s = s; + RzILOpEffect *imm_assign_15 = SETL("s", s); + + // Rd = ((st32) ((shamt < 0x0) ? ((((ut64) ((ut32) s)) >> (-shamt) - 0x1) >> 0x1) : (((ut64) ((ut32) s)) << shamt))); + RzILOpPure *op_LT_14 = SLT(VARL("shamt"), SN(32, 0)); + RzILOpPure *op_NEG_19 = NEG(VARL("shamt")); + RzILOpPure *op_SUB_21 = SUB(op_NEG_19, SN(32, 1)); + RzILOpPure *op_RSHIFT_22 = SHIFTR0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, VARL("s"))), op_SUB_21); + RzILOpPure *op_RSHIFT_24 = SHIFTR0(op_RSHIFT_22, SN(32, 1)); + RzILOpPure *op_LSHIFT_27 = SHIFTL0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, VARL("s"))), VARL("shamt")); + RzILOpPure *cond_28 = ITE(op_LT_14, op_RSHIFT_24, op_LSHIFT_27); + RzILOpEffect *op_ASSIGN_30 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, cond_28)); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_15, op_ASSIGN_10, op_ASSIGN_30); + return instruction_sequence; +} + +// Pd = !tstbit(Rs,Ii) +RzILOpEffect *hex_il_op_s4_ntstbit_i(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // Pd = ((st8) (((Rs & (0x1 << u)) == 0x0) ? 0xff : 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(32, 1), VARL("u")); + RzILOpPure *op_AND_6 = LOGAND(Rs, op_LSHIFT_5); + RzILOpPure *op_EQ_8 = EQ(op_AND_6, SN(32, 0)); + RzILOpPure *cond_11 = ITE(op_EQ_8, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_13 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_11), DUP(cond_11))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_3, op_ASSIGN_13); + return instruction_sequence; +} + +// Pd = !tstbit(Rs,Rt) +RzILOpEffect *hex_il_op_s4_ntstbit_r(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Pd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // Pd = ((st8) (((((ut64) ((ut32) Rs)) & ((sextract64(((ut64) Rt), 0x0, 0x7) < ((st64) 0x0)) ? ((((ut64) ((ut32) 0x1)) >> (-sextract64(((ut64) Rt), 0x0, 0x7)) - ((st64) 0x1)) >> 0x1) : (((ut64) ((ut32) 0x1)) << sextract64(((ut64) Rt), 0x0, 0x7)))) == ((ut64) 0x0)) ? 0xff : 0x0)); + RzILOpPure *op_LT_15 = SLT(SEXTRACT64(CAST(64, IL_FALSE, Rt), SN(32, 0), SN(32, 7)), CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_NEG_27 = NEG(SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7))); + RzILOpPure *op_SUB_30 = SUB(op_NEG_27, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *op_RSHIFT_31 = SHIFTR0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, SN(32, 1))), op_SUB_30); + RzILOpPure *op_RSHIFT_33 = SHIFTR0(op_RSHIFT_31, SN(32, 1)); + RzILOpPure *op_LSHIFT_45 = SHIFTL0(CAST(64, IL_FALSE, CAST(32, IL_FALSE, SN(32, 1))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rt)), SN(32, 0), SN(32, 7))); + RzILOpPure *cond_46 = ITE(op_LT_15, op_RSHIFT_33, op_LSHIFT_45); + RzILOpPure *op_AND_47 = LOGAND(CAST(64, IL_FALSE, CAST(32, IL_FALSE, Rs)), cond_46); + RzILOpPure *op_EQ_50 = EQ(op_AND_47, CAST(64, IL_FALSE, SN(32, 0))); + RzILOpPure *cond_53 = ITE(op_EQ_50, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_55 = WRITE_REG(bundle, Pd_op, CAST(8, MSB(cond_53), DUP(cond_53))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_55; + return instruction_sequence; +} + +// Rx |= and(Rs,Ii) +RzILOpEffect *hex_il_op_s4_or_andi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + + // s = s; + RzILOpEffect *imm_assign_2 = SETL("s", s); + + // Rx = (Rx | (Rs & s)); + RzILOpPure *op_AND_4 = LOGAND(Rs, VARL("s")); + RzILOpPure *op_OR_5 = LOGOR(READ_REG(pkt, Rx_op, false), op_AND_4); + RzILOpEffect *op_ASSIGN_6 = WRITE_REG(bundle, Rx_op, op_OR_5); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, op_ASSIGN_6); + return instruction_sequence; +} + +// Rx = or(Ru,and(Rxin,Ii)) +RzILOpEffect *hex_il_op_s4_or_andix(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + + // s = s; + RzILOpEffect *imm_assign_2 = SETL("s", s); + + // Rx = (Ru | (Rx & s)); + RzILOpPure *op_AND_4 = LOGAND(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpPure *op_OR_5 = LOGOR(Ru, op_AND_4); + RzILOpEffect *op_ASSIGN_6 = WRITE_REG(bundle, Rx_op, op_OR_5); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, op_ASSIGN_6); + return instruction_sequence; +} + +// Rx |= or(Rs,Ii) +RzILOpEffect *hex_il_op_s4_or_ori(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + + // s = s; + RzILOpEffect *imm_assign_2 = SETL("s", s); + + // Rx = (Rx | (Rs | s)); + RzILOpPure *op_OR_4 = LOGOR(Rs, VARL("s")); + RzILOpPure *op_OR_5 = LOGOR(READ_REG(pkt, Rx_op, false), op_OR_4); + RzILOpEffect *op_ASSIGN_6 = WRITE_REG(bundle, Rx_op, op_OR_5); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, op_ASSIGN_6); + return instruction_sequence; +} + +// Rx = or(Ii,asl(Rxin,II)) +RzILOpEffect *hex_il_op_s4_ori_asl_ri(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // U = U; + RzILOpEffect *imm_assign_3 = SETL("U", U); + + // Rx = ((st32) (u | ((ut32) (Rx << U)))); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(READ_REG(pkt, Rx_op, false), VARL("U")); + RzILOpPure *op_OR_7 = LOGOR(VARL("u"), CAST(32, IL_FALSE, op_LSHIFT_5)); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_OR_7)); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_1, imm_assign_3, op_ASSIGN_9); + return instruction_sequence; +} + +// Rx = or(Ii,lsr(Rxin,II)) +RzILOpEffect *hex_il_op_s4_ori_lsr_ri(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // U = U; + RzILOpEffect *imm_assign_4 = SETL("U", U); + + // Rx = ((st32) (u | (((ut32) Rx) >> U))); + RzILOpPure *op_RSHIFT_6 = SHIFTR0(CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false)), VARL("U")); + RzILOpPure *op_OR_7 = LOGOR(VARL("u"), op_RSHIFT_6); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_OR_7)); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_1, imm_assign_4, op_ASSIGN_9); + return instruction_sequence; +} + +// Rd = parity(Rs,Rt) +RzILOpEffect *hex_il_op_s4_parity(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (!Pv) memb(Ii) = Rt +RzILOpEffect *hex_il_op_s4_pstorerbf_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_18_19 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_16), DUP(op_AND_16)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_20 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff))))); + RzILOpEffect *seq_then_21 = ms_cast_ut8_18_19; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_22 = c_call_20; + + // if (! (((st32) Pv) & 0x1)) {seq(mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_23 = BRANCH(op_INV_9, seq_then_21, seq_else_22); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_23); + return instruction_sequence; +} + +// if (!Pv) memb(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_19, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_23_24 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_21), DUP(op_AND_21)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_25 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff))))); + RzILOpEffect *seq_then_26 = ms_cast_ut8_23_24; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_27 = c_call_25; + + // if (! (((st32) Pv) & 0x1)) {seq(mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_14 = INV(NON_ZERO(op_AND_13)); + RzILOpEffect *branch_28 = BRANCH(op_INV_14, seq_then_26, seq_else_27); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_28); + return instruction_sequence; +} + +// if (!Pv.new) memb(Ii) = Rt +RzILOpEffect *hex_il_op_s4_pstorerbfnew_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_18_19 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_16), DUP(op_AND_16)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_20 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff))))); + RzILOpEffect *seq_then_21 = ms_cast_ut8_18_19; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_22 = c_call_20; + + // if (! (((st32) Pv_new) & 0x1)) {seq(mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_23 = BRANCH(op_INV_9, seq_then_21, seq_else_22); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_23); + return instruction_sequence; +} + +// if (!Pv.new) memb(Rs+Ii) = Rt +RzILOpEffect *hex_il_op_s4_pstorerbfnew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_21_22 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_19), DUP(op_AND_19)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_23 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff))))); + RzILOpEffect *seq_then_24 = ms_cast_ut8_21_22; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_25 = c_call_23; + + // if (! (((st32) Pv_new) & 0x1)) {seq(mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_12 = INV(NON_ZERO(op_AND_11)); + RzILOpEffect *branch_26 = BRANCH(op_INV_12, seq_then_24, seq_else_25); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_26); + return instruction_sequence; +} + +// if (!Pv.new) memb(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_19, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_23_24 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_21), DUP(op_AND_21)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_25 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff))))); + RzILOpEffect *seq_then_26 = ms_cast_ut8_23_24; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_27 = c_call_25; + + // if (! (((st32) Pv_new) & 0x1)) {seq(mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_14 = INV(NON_ZERO(op_AND_13)); + RzILOpEffect *branch_28 = BRANCH(op_INV_14, seq_then_26, seq_else_27); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_28); + return instruction_sequence; +} + +// if (!Pv) memb(Ii) = Nt.new +RzILOpEffect *hex_il_op_s4_pstorerbnewf_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_18_19 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_16), DUP(op_AND_16)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_20 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff))))); + RzILOpEffect *seq_then_21 = ms_cast_ut8_18_19; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_22 = c_call_20; + + // if (! (((st32) Pv) & 0x1)) {seq(mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_23 = BRANCH(op_INV_9, seq_then_21, seq_else_22); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_23); + return instruction_sequence; +} + +// if (!Pv) memb(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_19, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_23_24 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_21), DUP(op_AND_21)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_25 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff))))); + RzILOpEffect *seq_then_26 = ms_cast_ut8_23_24; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_27 = c_call_25; + + // if (! (((st32) Pv) & 0x1)) {seq(mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_14 = INV(NON_ZERO(op_AND_13)); + RzILOpEffect *branch_28 = BRANCH(op_INV_14, seq_then_26, seq_else_27); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_28); + return instruction_sequence; +} + +// if (!Pv.new) memb(Ii) = Nt.new +RzILOpEffect *hex_il_op_s4_pstorerbnewfnew_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_18_19 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_16), DUP(op_AND_16)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_20 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff))))); + RzILOpEffect *seq_then_21 = ms_cast_ut8_18_19; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_22 = c_call_20; + + // if (! (((st32) Pv_new) & 0x1)) {seq(mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_23 = BRANCH(op_INV_9, seq_then_21, seq_else_22); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_23); + return instruction_sequence; +} + +// if (!Pv.new) memb(Rs+Ii) = Nt.new +RzILOpEffect *hex_il_op_s4_pstorerbnewfnew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_21_22 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_19), DUP(op_AND_19)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_23 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff))))); + RzILOpEffect *seq_then_24 = ms_cast_ut8_21_22; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_25 = c_call_23; + + // if (! (((st32) Pv_new) & 0x1)) {seq(mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_12 = INV(NON_ZERO(op_AND_11)); + RzILOpEffect *branch_26 = BRANCH(op_INV_12, seq_then_24, seq_else_25); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_26); + return instruction_sequence; +} + +// if (!Pv.new) memb(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_19, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_23_24 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_21), DUP(op_AND_21)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_25 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff))))); + RzILOpEffect *seq_then_26 = ms_cast_ut8_23_24; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_27 = c_call_25; + + // if (! (((st32) Pv_new) & 0x1)) {seq(mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_14 = INV(NON_ZERO(op_AND_13)); + RzILOpEffect *branch_28 = BRANCH(op_INV_14, seq_then_26, seq_else_27); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_28); + return instruction_sequence; +} + +// if (Pv) memb(Ii) = Nt.new +RzILOpEffect *hex_il_op_s4_pstorerbnewt_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_13 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_13, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_17_18 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_15), DUP(op_AND_15)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_19 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff))))); + RzILOpEffect *seq_then_20 = ms_cast_ut8_17_18; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_21 = c_call_19; + + // if ((((st32) Pv) & 0x1)) {seq(mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_22 = BRANCH(NON_ZERO(op_AND_8), seq_then_20, seq_else_21); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_22); + return instruction_sequence; +} + +// if (Pv) memb(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_18 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_20 = LOGAND(op_RSHIFT_18, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_22_23 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_20), DUP(op_AND_20)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_24 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff))))); + RzILOpEffect *seq_then_25 = ms_cast_ut8_22_23; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_26 = c_call_24; + + // if ((((st32) Pv) & 0x1)) {seq(mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_27 = BRANCH(NON_ZERO(op_AND_13), seq_then_25, seq_else_26); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_27); + return instruction_sequence; +} + +// if (Pv.new) memb(Ii) = Nt.new +RzILOpEffect *hex_il_op_s4_pstorerbnewtnew_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_13 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_13, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_17_18 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_15), DUP(op_AND_15)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_19 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff))))); + RzILOpEffect *seq_then_20 = ms_cast_ut8_17_18; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_21 = c_call_19; + + // if ((((st32) Pv_new) & 0x1)) {seq(mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_22 = BRANCH(NON_ZERO(op_AND_8), seq_then_20, seq_else_21); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_22); + return instruction_sequence; +} + +// if (Pv.new) memb(Rs+Ii) = Nt.new +RzILOpEffect *hex_il_op_s4_pstorerbnewtnew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_20_21 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_18), DUP(op_AND_18)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_22 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff))))); + RzILOpEffect *seq_then_23 = ms_cast_ut8_20_21; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_24 = c_call_22; + + // if ((((st32) Pv_new) & 0x1)) {seq(mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_25 = BRANCH(NON_ZERO(op_AND_11), seq_then_23, seq_else_24); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_25); + return instruction_sequence; +} + +// if (Pv.new) memb(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_18 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_20 = LOGAND(op_RSHIFT_18, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_22_23 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_20), DUP(op_AND_20)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_24 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff))))); + RzILOpEffect *seq_then_25 = ms_cast_ut8_22_23; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_26 = c_call_24; + + // if ((((st32) Pv_new) & 0x1)) {seq(mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_27 = BRANCH(NON_ZERO(op_AND_13), seq_then_25, seq_else_26); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_27); + return instruction_sequence; +} + +// if (Pv) memb(Ii) = Rt +RzILOpEffect *hex_il_op_s4_pstorerbt_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_13 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_13, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_17_18 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_15), DUP(op_AND_15)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_19 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff))))); + RzILOpEffect *seq_then_20 = ms_cast_ut8_17_18; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_21 = c_call_19; + + // if ((((st32) Pv) & 0x1)) {seq(mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_22 = BRANCH(NON_ZERO(op_AND_8), seq_then_20, seq_else_21); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_22); + return instruction_sequence; +} + +// if (Pv) memb(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_18 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_20 = LOGAND(op_RSHIFT_18, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_22_23 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_20), DUP(op_AND_20)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_24 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff))))); + RzILOpEffect *seq_then_25 = ms_cast_ut8_22_23; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_26 = c_call_24; + + // if ((((st32) Pv) & 0x1)) {seq(mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_27 = BRANCH(NON_ZERO(op_AND_13), seq_then_25, seq_else_26); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_27); + return instruction_sequence; +} + +// if (Pv.new) memb(Ii) = Rt +RzILOpEffect *hex_il_op_s4_pstorerbtnew_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_13 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_13, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_17_18 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_15), DUP(op_AND_15)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_19 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff))))); + RzILOpEffect *seq_then_20 = ms_cast_ut8_17_18; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_21 = c_call_19; + + // if ((((st32) Pv_new) & 0x1)) {seq(mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_22 = BRANCH(NON_ZERO(op_AND_8), seq_then_20, seq_else_21); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_22); + return instruction_sequence; +} + +// if (Pv.new) memb(Rs+Ii) = Rt +RzILOpEffect *hex_il_op_s4_pstorerbtnew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_20_21 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_18), DUP(op_AND_18)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_22 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff))))); + RzILOpEffect *seq_then_23 = ms_cast_ut8_20_21; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_24 = c_call_22; + + // if ((((st32) Pv_new) & 0x1)) {seq(mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_25 = BRANCH(NON_ZERO(op_AND_11), seq_then_23, seq_else_24); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_25); + return instruction_sequence; +} + +// if (Pv.new) memb(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_18 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_20 = LOGAND(op_RSHIFT_18, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_22_23 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_20), DUP(op_AND_20)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_24 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff))))); + RzILOpEffect *seq_then_25 = ms_cast_ut8_22_23; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_26 = c_call_24; + + // if ((((st32) Pv_new) & 0x1)) {seq(mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_27 = BRANCH(NON_ZERO(op_AND_13), seq_then_25, seq_else_26); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_27); + return instruction_sequence; +} + +// if (!Pv) memd(Ii) = Rtt +RzILOpEffect *hex_il_op_s4_pstorerdf_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // mem_store_ut64(EA, ((ut64) Rtt)); + RzILOpEffect *ms_cast_ut64_11_12 = STOREW(VARL("EA"), CAST(64, IL_FALSE, Rtt)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_13 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut64(EA, ((ut64) Rtt))); + RzILOpEffect *seq_then_14 = ms_cast_ut64_11_12; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_15 = c_call_13; + + // if (! (((st32) Pv) & 0x1)) {seq(mem_store_ut64(EA, ((ut64) Rtt)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_16 = BRANCH(op_INV_9, seq_then_14, seq_else_15); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_16); + return instruction_sequence; +} + +// if (!Pv) memd(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut64(EA, ((ut64) Rtt)); + RzILOpEffect *ms_cast_ut64_16_17 = STOREW(VARL("EA"), CAST(64, IL_FALSE, Rtt)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_18 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut64(EA, ((ut64) Rtt))); + RzILOpEffect *seq_then_19 = ms_cast_ut64_16_17; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_20 = c_call_18; + + // if (! (((st32) Pv) & 0x1)) {seq(mem_store_ut64(EA, ((ut64) Rtt)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_14 = INV(NON_ZERO(op_AND_13)); + RzILOpEffect *branch_21 = BRANCH(op_INV_14, seq_then_19, seq_else_20); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_21); + return instruction_sequence; +} + +// if (!Pv.new) memd(Ii) = Rtt +RzILOpEffect *hex_il_op_s4_pstorerdfnew_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // mem_store_ut64(EA, ((ut64) Rtt)); + RzILOpEffect *ms_cast_ut64_11_12 = STOREW(VARL("EA"), CAST(64, IL_FALSE, Rtt)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_13 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut64(EA, ((ut64) Rtt))); + RzILOpEffect *seq_then_14 = ms_cast_ut64_11_12; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_15 = c_call_13; + + // if (! (((st32) Pv_new) & 0x1)) {seq(mem_store_ut64(EA, ((ut64) Rtt)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_16 = BRANCH(op_INV_9, seq_then_14, seq_else_15); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_16); + return instruction_sequence; +} + +// if (!Pv.new) memd(Rs+Ii) = Rtt +RzILOpEffect *hex_il_op_s4_pstorerdfnew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut64(EA, ((ut64) Rtt)); + RzILOpEffect *ms_cast_ut64_14_15 = STOREW(VARL("EA"), CAST(64, IL_FALSE, Rtt)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_16 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut64(EA, ((ut64) Rtt))); + RzILOpEffect *seq_then_17 = ms_cast_ut64_14_15; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_18 = c_call_16; + + // if (! (((st32) Pv_new) & 0x1)) {seq(mem_store_ut64(EA, ((ut64) Rtt)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_12 = INV(NON_ZERO(op_AND_11)); + RzILOpEffect *branch_19 = BRANCH(op_INV_12, seq_then_17, seq_else_18); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_19); + return instruction_sequence; +} + +// if (!Pv.new) memd(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut64(EA, ((ut64) Rtt)); + RzILOpEffect *ms_cast_ut64_16_17 = STOREW(VARL("EA"), CAST(64, IL_FALSE, Rtt)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_18 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut64(EA, ((ut64) Rtt))); + RzILOpEffect *seq_then_19 = ms_cast_ut64_16_17; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_20 = c_call_18; + + // if (! (((st32) Pv_new) & 0x1)) {seq(mem_store_ut64(EA, ((ut64) Rtt)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_14 = INV(NON_ZERO(op_AND_13)); + RzILOpEffect *branch_21 = BRANCH(op_INV_14, seq_then_19, seq_else_20); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_21); + return instruction_sequence; +} + +// if (Pv) memd(Ii) = Rtt +RzILOpEffect *hex_il_op_s4_pstorerdt_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // mem_store_ut64(EA, ((ut64) Rtt)); + RzILOpEffect *ms_cast_ut64_10_11 = STOREW(VARL("EA"), CAST(64, IL_FALSE, Rtt)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_12 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut64(EA, ((ut64) Rtt))); + RzILOpEffect *seq_then_13 = ms_cast_ut64_10_11; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_14 = c_call_12; + + // if ((((st32) Pv) & 0x1)) {seq(mem_store_ut64(EA, ((ut64) Rtt)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_15 = BRANCH(NON_ZERO(op_AND_8), seq_then_13, seq_else_14); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_15); + return instruction_sequence; +} + +// if (Pv) memd(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut64(EA, ((ut64) Rtt)); + RzILOpEffect *ms_cast_ut64_15_16 = STOREW(VARL("EA"), CAST(64, IL_FALSE, Rtt)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_17 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut64(EA, ((ut64) Rtt))); + RzILOpEffect *seq_then_18 = ms_cast_ut64_15_16; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_19 = c_call_17; + + // if ((((st32) Pv) & 0x1)) {seq(mem_store_ut64(EA, ((ut64) Rtt)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_20 = BRANCH(NON_ZERO(op_AND_13), seq_then_18, seq_else_19); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_20); + return instruction_sequence; +} + +// if (Pv.new) memd(Ii) = Rtt +RzILOpEffect *hex_il_op_s4_pstorerdtnew_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // mem_store_ut64(EA, ((ut64) Rtt)); + RzILOpEffect *ms_cast_ut64_10_11 = STOREW(VARL("EA"), CAST(64, IL_FALSE, Rtt)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_12 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut64(EA, ((ut64) Rtt))); + RzILOpEffect *seq_then_13 = ms_cast_ut64_10_11; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_14 = c_call_12; + + // if ((((st32) Pv_new) & 0x1)) {seq(mem_store_ut64(EA, ((ut64) Rtt)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_15 = BRANCH(NON_ZERO(op_AND_8), seq_then_13, seq_else_14); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_15); + return instruction_sequence; +} + +// if (Pv.new) memd(Rs+Ii) = Rtt +RzILOpEffect *hex_il_op_s4_pstorerdtnew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut64(EA, ((ut64) Rtt)); + RzILOpEffect *ms_cast_ut64_13_14 = STOREW(VARL("EA"), CAST(64, IL_FALSE, Rtt)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_15 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut64(EA, ((ut64) Rtt))); + RzILOpEffect *seq_then_16 = ms_cast_ut64_13_14; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_17 = c_call_15; + + // if ((((st32) Pv_new) & 0x1)) {seq(mem_store_ut64(EA, ((ut64) Rtt)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_11), seq_then_16, seq_else_17); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_18); + return instruction_sequence; +} + +// if (Pv.new) memd(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut64(EA, ((ut64) Rtt)); + RzILOpEffect *ms_cast_ut64_15_16 = STOREW(VARL("EA"), CAST(64, IL_FALSE, Rtt)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_17 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut64(EA, ((ut64) Rtt))); + RzILOpEffect *seq_then_18 = ms_cast_ut64_15_16; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_19 = c_call_17; + + // if ((((st32) Pv_new) & 0x1)) {seq(mem_store_ut64(EA, ((ut64) Rtt)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_20 = BRANCH(NON_ZERO(op_AND_13), seq_then_18, seq_else_19); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_20); + return instruction_sequence; +} + +// if (!Pv) memh(Ii) = Rt.h +RzILOpEffect *hex_il_op_s4_pstorerff_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_18_19 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_16), DUP(op_AND_16)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_20 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff))) ...; + RzILOpEffect *seq_then_21 = ms_cast_ut16_18_19; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_22 = c_call_20; + + // if (! (((st32) Pv) & 0x1)) {seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff))) ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_23 = BRANCH(op_INV_9, seq_then_21, seq_else_22); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_23); + return instruction_sequence; +} + +// if (!Pv) memh(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_19, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_23_24 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_21), DUP(op_AND_21)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_25 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff))) ...; + RzILOpEffect *seq_then_26 = ms_cast_ut16_23_24; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_27 = c_call_25; + + // if (! (((st32) Pv) & 0x1)) {seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff))) ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_14 = INV(NON_ZERO(op_AND_13)); + RzILOpEffect *branch_28 = BRANCH(op_INV_14, seq_then_26, seq_else_27); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_28); + return instruction_sequence; +} + +// if (!Pv.new) memh(Ii) = Rt.h +RzILOpEffect *hex_il_op_s4_pstorerffnew_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_18_19 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_16), DUP(op_AND_16)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_20 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff))) ...; + RzILOpEffect *seq_then_21 = ms_cast_ut16_18_19; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_22 = c_call_20; + + // if (! (((st32) Pv_new) & 0x1)) {seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff))) ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_23 = BRANCH(op_INV_9, seq_then_21, seq_else_22); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_23); + return instruction_sequence; +} + +// if (!Pv.new) memh(Rs+Ii) = Rt.h +RzILOpEffect *hex_il_op_s4_pstorerffnew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_21_22 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_19), DUP(op_AND_19)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_23 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff))) ...; + RzILOpEffect *seq_then_24 = ms_cast_ut16_21_22; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_25 = c_call_23; + + // if (! (((st32) Pv_new) & 0x1)) {seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff))) ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_12 = INV(NON_ZERO(op_AND_11)); + RzILOpEffect *branch_26 = BRANCH(op_INV_12, seq_then_24, seq_else_25); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_26); + return instruction_sequence; +} + +// if (!Pv.new) memh(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_19, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_23_24 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_21), DUP(op_AND_21)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_25 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff))) ...; + RzILOpEffect *seq_then_26 = ms_cast_ut16_23_24; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_27 = c_call_25; + + // if (! (((st32) Pv_new) & 0x1)) {seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff))) ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_14 = INV(NON_ZERO(op_AND_13)); + RzILOpEffect *branch_28 = BRANCH(op_INV_14, seq_then_26, seq_else_27); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_28); + return instruction_sequence; +} + +// if (Pv) memh(Ii) = Rt.h +RzILOpEffect *hex_il_op_s4_pstorerft_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_13 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_13, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_17_18 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_15), DUP(op_AND_15)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_19 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff))) ...; + RzILOpEffect *seq_then_20 = ms_cast_ut16_17_18; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_21 = c_call_19; + + // if ((((st32) Pv) & 0x1)) {seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff))) ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_22 = BRANCH(NON_ZERO(op_AND_8), seq_then_20, seq_else_21); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_22); + return instruction_sequence; +} + +// if (Pv) memh(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_18 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_20 = LOGAND(op_RSHIFT_18, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_22_23 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_20), DUP(op_AND_20)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_24 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff))) ...; + RzILOpEffect *seq_then_25 = ms_cast_ut16_22_23; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_26 = c_call_24; + + // if ((((st32) Pv) & 0x1)) {seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff))) ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_27 = BRANCH(NON_ZERO(op_AND_13), seq_then_25, seq_else_26); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_27); + return instruction_sequence; +} + +// if (Pv.new) memh(Ii) = Rt.h +RzILOpEffect *hex_il_op_s4_pstorerftnew_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_13 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_13, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_17_18 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_15), DUP(op_AND_15)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_19 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff))) ...; + RzILOpEffect *seq_then_20 = ms_cast_ut16_17_18; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_21 = c_call_19; + + // if ((((st32) Pv_new) & 0x1)) {seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff))) ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_22 = BRANCH(NON_ZERO(op_AND_8), seq_then_20, seq_else_21); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_22); + return instruction_sequence; +} + +// if (Pv.new) memh(Rs+Ii) = Rt.h +RzILOpEffect *hex_il_op_s4_pstorerftnew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_20_21 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_18), DUP(op_AND_18)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_22 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff))) ...; + RzILOpEffect *seq_then_23 = ms_cast_ut16_20_21; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_24 = c_call_22; + + // if ((((st32) Pv_new) & 0x1)) {seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff))) ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_25 = BRANCH(NON_ZERO(op_AND_11), seq_then_23, seq_else_24); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_25); + return instruction_sequence; +} + +// if (Pv.new) memh(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_18 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_20 = LOGAND(op_RSHIFT_18, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_22_23 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_20), DUP(op_AND_20)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_24 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff))) ...; + RzILOpEffect *seq_then_25 = ms_cast_ut16_22_23; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_26 = c_call_24; + + // if ((((st32) Pv_new) & 0x1)) {seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff))) ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_27 = BRANCH(NON_ZERO(op_AND_13), seq_then_25, seq_else_26); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_27); + return instruction_sequence; +} + +// if (!Pv) memh(Ii) = Rt +RzILOpEffect *hex_il_op_s4_pstorerhf_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_18_19 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_16), DUP(op_AND_16)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_20 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff))))); + RzILOpEffect *seq_then_21 = ms_cast_ut16_18_19; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_22 = c_call_20; + + // if (! (((st32) Pv) & 0x1)) {seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_23 = BRANCH(op_INV_9, seq_then_21, seq_else_22); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_23); + return instruction_sequence; +} + +// if (!Pv) memh(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_19, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_23_24 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_21), DUP(op_AND_21)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_25 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff))))); + RzILOpEffect *seq_then_26 = ms_cast_ut16_23_24; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_27 = c_call_25; + + // if (! (((st32) Pv) & 0x1)) {seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_14 = INV(NON_ZERO(op_AND_13)); + RzILOpEffect *branch_28 = BRANCH(op_INV_14, seq_then_26, seq_else_27); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_28); + return instruction_sequence; +} + +// if (!Pv.new) memh(Ii) = Rt +RzILOpEffect *hex_il_op_s4_pstorerhfnew_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_18_19 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_16), DUP(op_AND_16)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_20 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff))))); + RzILOpEffect *seq_then_21 = ms_cast_ut16_18_19; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_22 = c_call_20; + + // if (! (((st32) Pv_new) & 0x1)) {seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_23 = BRANCH(op_INV_9, seq_then_21, seq_else_22); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_23); + return instruction_sequence; +} + +// if (!Pv.new) memh(Rs+Ii) = Rt +RzILOpEffect *hex_il_op_s4_pstorerhfnew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_21_22 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_19), DUP(op_AND_19)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_23 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff))))); + RzILOpEffect *seq_then_24 = ms_cast_ut16_21_22; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_25 = c_call_23; + + // if (! (((st32) Pv_new) & 0x1)) {seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_12 = INV(NON_ZERO(op_AND_11)); + RzILOpEffect *branch_26 = BRANCH(op_INV_12, seq_then_24, seq_else_25); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_26); + return instruction_sequence; +} + +// if (!Pv.new) memh(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_19, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_23_24 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_21), DUP(op_AND_21)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_25 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff))))); + RzILOpEffect *seq_then_26 = ms_cast_ut16_23_24; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_27 = c_call_25; + + // if (! (((st32) Pv_new) & 0x1)) {seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_14 = INV(NON_ZERO(op_AND_13)); + RzILOpEffect *branch_28 = BRANCH(op_INV_14, seq_then_26, seq_else_27); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_28); + return instruction_sequence; +} + +// if (!Pv) memh(Ii) = Nt.new +RzILOpEffect *hex_il_op_s4_pstorerhnewf_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_18_19 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_16), DUP(op_AND_16)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_20 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff ...; + RzILOpEffect *seq_then_21 = ms_cast_ut16_18_19; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_22 = c_call_20; + + // if (! (((st32) Pv) & 0x1)) {seq(mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_23 = BRANCH(op_INV_9, seq_then_21, seq_else_22); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_23); + return instruction_sequence; +} + +// if (!Pv) memh(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_19, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_23_24 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_21), DUP(op_AND_21)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_25 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff ...; + RzILOpEffect *seq_then_26 = ms_cast_ut16_23_24; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_27 = c_call_25; + + // if (! (((st32) Pv) & 0x1)) {seq(mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_14 = INV(NON_ZERO(op_AND_13)); + RzILOpEffect *branch_28 = BRANCH(op_INV_14, seq_then_26, seq_else_27); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_28); + return instruction_sequence; +} + +// if (!Pv.new) memh(Ii) = Nt.new +RzILOpEffect *hex_il_op_s4_pstorerhnewfnew_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_18_19 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_16), DUP(op_AND_16)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_20 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff ...; + RzILOpEffect *seq_then_21 = ms_cast_ut16_18_19; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_22 = c_call_20; + + // if (! (((st32) Pv_new) & 0x1)) {seq(mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_23 = BRANCH(op_INV_9, seq_then_21, seq_else_22); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_23); + return instruction_sequence; +} + +// if (!Pv.new) memh(Rs+Ii) = Nt.new +RzILOpEffect *hex_il_op_s4_pstorerhnewfnew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_17 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_19 = LOGAND(op_RSHIFT_17, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_21_22 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_19), DUP(op_AND_19)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_23 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff ...; + RzILOpEffect *seq_then_24 = ms_cast_ut16_21_22; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_25 = c_call_23; + + // if (! (((st32) Pv_new) & 0x1)) {seq(mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_12 = INV(NON_ZERO(op_AND_11)); + RzILOpEffect *branch_26 = BRANCH(op_INV_12, seq_then_24, seq_else_25); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_26); + return instruction_sequence; +} + +// if (!Pv.new) memh(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_19, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_23_24 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_21), DUP(op_AND_21)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_25 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff ...; + RzILOpEffect *seq_then_26 = ms_cast_ut16_23_24; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_27 = c_call_25; + + // if (! (((st32) Pv_new) & 0x1)) {seq(mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_14 = INV(NON_ZERO(op_AND_13)); + RzILOpEffect *branch_28 = BRANCH(op_INV_14, seq_then_26, seq_else_27); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_28); + return instruction_sequence; +} + +// if (Pv) memh(Ii) = Nt.new +RzILOpEffect *hex_il_op_s4_pstorerhnewt_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_13 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_13, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_17_18 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_15), DUP(op_AND_15)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_19 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff ...; + RzILOpEffect *seq_then_20 = ms_cast_ut16_17_18; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_21 = c_call_19; + + // if ((((st32) Pv) & 0x1)) {seq(mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_22 = BRANCH(NON_ZERO(op_AND_8), seq_then_20, seq_else_21); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_22); + return instruction_sequence; +} + +// if (Pv) memh(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_18 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_20 = LOGAND(op_RSHIFT_18, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_22_23 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_20), DUP(op_AND_20)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_24 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff ...; + RzILOpEffect *seq_then_25 = ms_cast_ut16_22_23; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_26 = c_call_24; + + // if ((((st32) Pv) & 0x1)) {seq(mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_27 = BRANCH(NON_ZERO(op_AND_13), seq_then_25, seq_else_26); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_27); + return instruction_sequence; +} + +// if (Pv.new) memh(Ii) = Nt.new +RzILOpEffect *hex_il_op_s4_pstorerhnewtnew_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_13 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_13, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_17_18 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_15), DUP(op_AND_15)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_19 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff ...; + RzILOpEffect *seq_then_20 = ms_cast_ut16_17_18; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_21 = c_call_19; + + // if ((((st32) Pv_new) & 0x1)) {seq(mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_22 = BRANCH(NON_ZERO(op_AND_8), seq_then_20, seq_else_21); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_22); + return instruction_sequence; +} + +// if (Pv.new) memh(Rs+Ii) = Nt.new +RzILOpEffect *hex_il_op_s4_pstorerhnewtnew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_20_21 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_18), DUP(op_AND_18)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_22 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff ...; + RzILOpEffect *seq_then_23 = ms_cast_ut16_20_21; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_24 = c_call_22; + + // if ((((st32) Pv_new) & 0x1)) {seq(mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_25 = BRANCH(NON_ZERO(op_AND_11), seq_then_23, seq_else_24); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_25); + return instruction_sequence; +} + +// if (Pv.new) memh(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_18 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_20 = LOGAND(op_RSHIFT_18, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_22_23 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_20), DUP(op_AND_20)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_24 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff ...; + RzILOpEffect *seq_then_25 = ms_cast_ut16_22_23; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_26 = c_call_24; + + // if ((((st32) Pv_new) & 0x1)) {seq(mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff ...} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_27 = BRANCH(NON_ZERO(op_AND_13), seq_then_25, seq_else_26); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_27); + return instruction_sequence; +} + +// if (Pv) memh(Ii) = Rt +RzILOpEffect *hex_il_op_s4_pstorerht_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_13 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_13, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_17_18 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_15), DUP(op_AND_15)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_19 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff))))); + RzILOpEffect *seq_then_20 = ms_cast_ut16_17_18; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_21 = c_call_19; + + // if ((((st32) Pv) & 0x1)) {seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_22 = BRANCH(NON_ZERO(op_AND_8), seq_then_20, seq_else_21); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_22); + return instruction_sequence; +} + +// if (Pv) memh(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_18 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_20 = LOGAND(op_RSHIFT_18, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_22_23 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_20), DUP(op_AND_20)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_24 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff))))); + RzILOpEffect *seq_then_25 = ms_cast_ut16_22_23; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_26 = c_call_24; + + // if ((((st32) Pv) & 0x1)) {seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_27 = BRANCH(NON_ZERO(op_AND_13), seq_then_25, seq_else_26); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_27); + return instruction_sequence; +} + +// if (Pv.new) memh(Ii) = Rt +RzILOpEffect *hex_il_op_s4_pstorerhtnew_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_13 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_15 = LOGAND(op_RSHIFT_13, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_17_18 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_15), DUP(op_AND_15)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_19 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff))))); + RzILOpEffect *seq_then_20 = ms_cast_ut16_17_18; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_21 = c_call_19; + + // if ((((st32) Pv_new) & 0x1)) {seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_22 = BRANCH(NON_ZERO(op_AND_8), seq_then_20, seq_else_21); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_22); + return instruction_sequence; +} + +// if (Pv.new) memh(Rs+Ii) = Rt +RzILOpEffect *hex_il_op_s4_pstorerhtnew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_16 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_16, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_20_21 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_18), DUP(op_AND_18)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_22 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff))))); + RzILOpEffect *seq_then_23 = ms_cast_ut16_20_21; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_24 = c_call_22; + + // if ((((st32) Pv_new) & 0x1)) {seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_25 = BRANCH(NON_ZERO(op_AND_11), seq_then_23, seq_else_24); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_25); + return instruction_sequence; +} + +// if (Pv.new) memh(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_18 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_20 = LOGAND(op_RSHIFT_18, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_22_23 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_20), DUP(op_AND_20)))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_24 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff))))); + RzILOpEffect *seq_then_25 = ms_cast_ut16_22_23; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_26 = c_call_24; + + // if ((((st32) Pv_new) & 0x1)) {seq(mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_27 = BRANCH(NON_ZERO(op_AND_13), seq_then_25, seq_else_26); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_27); + return instruction_sequence; +} + +// if (!Pv) memw(Ii) = Rt +RzILOpEffect *hex_il_op_s4_pstorerif_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // mem_store_ut32(EA, ((ut32) Rt)); + RzILOpEffect *ms_cast_ut32_11_12 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Rt)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_13 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut32(EA, ((ut32) Rt))); + RzILOpEffect *seq_then_14 = ms_cast_ut32_11_12; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_15 = c_call_13; + + // if (! (((st32) Pv) & 0x1)) {seq(mem_store_ut32(EA, ((ut32) Rt)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_16 = BRANCH(op_INV_9, seq_then_14, seq_else_15); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_16); + return instruction_sequence; +} + +// if (!Pv) memw(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut32(EA, ((ut32) Rt)); + RzILOpEffect *ms_cast_ut32_16_17 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Rt)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_18 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut32(EA, ((ut32) Rt))); + RzILOpEffect *seq_then_19 = ms_cast_ut32_16_17; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_20 = c_call_18; + + // if (! (((st32) Pv) & 0x1)) {seq(mem_store_ut32(EA, ((ut32) Rt)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_14 = INV(NON_ZERO(op_AND_13)); + RzILOpEffect *branch_21 = BRANCH(op_INV_14, seq_then_19, seq_else_20); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_21); + return instruction_sequence; +} + +// if (!Pv.new) memw(Ii) = Rt +RzILOpEffect *hex_il_op_s4_pstorerifnew_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // mem_store_ut32(EA, ((ut32) Rt)); + RzILOpEffect *ms_cast_ut32_11_12 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Rt)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_13 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut32(EA, ((ut32) Rt))); + RzILOpEffect *seq_then_14 = ms_cast_ut32_11_12; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_15 = c_call_13; + + // if (! (((st32) Pv_new) & 0x1)) {seq(mem_store_ut32(EA, ((ut32) Rt)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_16 = BRANCH(op_INV_9, seq_then_14, seq_else_15); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_16); + return instruction_sequence; +} + +// if (!Pv.new) memw(Rs+Ii) = Rt +RzILOpEffect *hex_il_op_s4_pstorerifnew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut32(EA, ((ut32) Rt)); + RzILOpEffect *ms_cast_ut32_14_15 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Rt)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_16 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut32(EA, ((ut32) Rt))); + RzILOpEffect *seq_then_17 = ms_cast_ut32_14_15; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_18 = c_call_16; + + // if (! (((st32) Pv_new) & 0x1)) {seq(mem_store_ut32(EA, ((ut32) Rt)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_12 = INV(NON_ZERO(op_AND_11)); + RzILOpEffect *branch_19 = BRANCH(op_INV_12, seq_then_17, seq_else_18); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_19); + return instruction_sequence; +} + +// if (!Pv.new) memw(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut32(EA, ((ut32) Rt)); + RzILOpEffect *ms_cast_ut32_16_17 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Rt)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_18 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut32(EA, ((ut32) Rt))); + RzILOpEffect *seq_then_19 = ms_cast_ut32_16_17; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_20 = c_call_18; + + // if (! (((st32) Pv_new) & 0x1)) {seq(mem_store_ut32(EA, ((ut32) Rt)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_14 = INV(NON_ZERO(op_AND_13)); + RzILOpEffect *branch_21 = BRANCH(op_INV_14, seq_then_19, seq_else_20); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_21); + return instruction_sequence; +} + +// if (!Pv) memw(Ii) = Nt.new +RzILOpEffect *hex_il_op_s4_pstorerinewf_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // mem_store_ut32(EA, ((ut32) Nt_new)); + RzILOpEffect *ms_cast_ut32_11_12 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Nt_new)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_13 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut32(EA, ((ut32) Nt_new))); + RzILOpEffect *seq_then_14 = ms_cast_ut32_11_12; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_15 = c_call_13; + + // if (! (((st32) Pv) & 0x1)) {seq(mem_store_ut32(EA, ((ut32) Nt_new)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_16 = BRANCH(op_INV_9, seq_then_14, seq_else_15); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_16); + return instruction_sequence; +} + +// if (!Pv) memw(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut32(EA, ((ut32) Nt_new)); + RzILOpEffect *ms_cast_ut32_16_17 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Nt_new)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_18 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut32(EA, ((ut32) Nt_new))); + RzILOpEffect *seq_then_19 = ms_cast_ut32_16_17; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_20 = c_call_18; + + // if (! (((st32) Pv) & 0x1)) {seq(mem_store_ut32(EA, ((ut32) Nt_new)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_14 = INV(NON_ZERO(op_AND_13)); + RzILOpEffect *branch_21 = BRANCH(op_INV_14, seq_then_19, seq_else_20); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_21); + return instruction_sequence; +} + +// if (!Pv.new) memw(Ii) = Nt.new +RzILOpEffect *hex_il_op_s4_pstorerinewfnew_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // mem_store_ut32(EA, ((ut32) Nt_new)); + RzILOpEffect *ms_cast_ut32_11_12 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Nt_new)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_13 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut32(EA, ((ut32) Nt_new))); + RzILOpEffect *seq_then_14 = ms_cast_ut32_11_12; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_15 = c_call_13; + + // if (! (((st32) Pv_new) & 0x1)) {seq(mem_store_ut32(EA, ((ut32) Nt_new)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_9 = INV(NON_ZERO(op_AND_8)); + RzILOpEffect *branch_16 = BRANCH(op_INV_9, seq_then_14, seq_else_15); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_16); + return instruction_sequence; +} + +// if (!Pv.new) memw(Rs+Ii) = Nt.new +RzILOpEffect *hex_il_op_s4_pstorerinewfnew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut32(EA, ((ut32) Nt_new)); + RzILOpEffect *ms_cast_ut32_14_15 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Nt_new)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_16 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut32(EA, ((ut32) Nt_new))); + RzILOpEffect *seq_then_17 = ms_cast_ut32_14_15; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_18 = c_call_16; + + // if (! (((st32) Pv_new) & 0x1)) {seq(mem_store_ut32(EA, ((ut32) Nt_new)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_12 = INV(NON_ZERO(op_AND_11)); + RzILOpEffect *branch_19 = BRANCH(op_INV_12, seq_then_17, seq_else_18); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_19); + return instruction_sequence; +} + +// if (!Pv.new) memw(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut32(EA, ((ut32) Nt_new)); + RzILOpEffect *ms_cast_ut32_16_17 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Nt_new)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_18 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut32(EA, ((ut32) Nt_new))); + RzILOpEffect *seq_then_19 = ms_cast_ut32_16_17; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_20 = c_call_18; + + // if (! (((st32) Pv_new) & 0x1)) {seq(mem_store_ut32(EA, ((ut32) Nt_new)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_14 = INV(NON_ZERO(op_AND_13)); + RzILOpEffect *branch_21 = BRANCH(op_INV_14, seq_then_19, seq_else_20); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_21); + return instruction_sequence; +} + +// if (Pv) memw(Ii) = Nt.new +RzILOpEffect *hex_il_op_s4_pstorerinewt_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // mem_store_ut32(EA, ((ut32) Nt_new)); + RzILOpEffect *ms_cast_ut32_10_11 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Nt_new)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_12 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut32(EA, ((ut32) Nt_new))); + RzILOpEffect *seq_then_13 = ms_cast_ut32_10_11; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_14 = c_call_12; + + // if ((((st32) Pv) & 0x1)) {seq(mem_store_ut32(EA, ((ut32) Nt_new)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_15 = BRANCH(NON_ZERO(op_AND_8), seq_then_13, seq_else_14); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_15); + return instruction_sequence; +} + +// if (Pv) memw(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut32(EA, ((ut32) Nt_new)); + RzILOpEffect *ms_cast_ut32_15_16 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Nt_new)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_17 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut32(EA, ((ut32) Nt_new))); + RzILOpEffect *seq_then_18 = ms_cast_ut32_15_16; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_19 = c_call_17; + + // if ((((st32) Pv) & 0x1)) {seq(mem_store_ut32(EA, ((ut32) Nt_new)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_20 = BRANCH(NON_ZERO(op_AND_13), seq_then_18, seq_else_19); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_20); + return instruction_sequence; +} + +// if (Pv.new) memw(Ii) = Nt.new +RzILOpEffect *hex_il_op_s4_pstorerinewtnew_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // mem_store_ut32(EA, ((ut32) Nt_new)); + RzILOpEffect *ms_cast_ut32_10_11 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Nt_new)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_12 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut32(EA, ((ut32) Nt_new))); + RzILOpEffect *seq_then_13 = ms_cast_ut32_10_11; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_14 = c_call_12; + + // if ((((st32) Pv_new) & 0x1)) {seq(mem_store_ut32(EA, ((ut32) Nt_new)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_15 = BRANCH(NON_ZERO(op_AND_8), seq_then_13, seq_else_14); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_15); + return instruction_sequence; +} + +// if (Pv.new) memw(Rs+Ii) = Nt.new +RzILOpEffect *hex_il_op_s4_pstorerinewtnew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut32(EA, ((ut32) Nt_new)); + RzILOpEffect *ms_cast_ut32_13_14 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Nt_new)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_15 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut32(EA, ((ut32) Nt_new))); + RzILOpEffect *seq_then_16 = ms_cast_ut32_13_14; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_17 = c_call_15; + + // if ((((st32) Pv_new) & 0x1)) {seq(mem_store_ut32(EA, ((ut32) Nt_new)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_11), seq_then_16, seq_else_17); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_18); + return instruction_sequence; +} + +// if (Pv.new) memw(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut32(EA, ((ut32) Nt_new)); + RzILOpEffect *ms_cast_ut32_15_16 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Nt_new)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_17 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut32(EA, ((ut32) Nt_new))); + RzILOpEffect *seq_then_18 = ms_cast_ut32_15_16; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_19 = c_call_17; + + // if ((((st32) Pv_new) & 0x1)) {seq(mem_store_ut32(EA, ((ut32) Nt_new)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_20 = BRANCH(NON_ZERO(op_AND_13), seq_then_18, seq_else_19); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_20); + return instruction_sequence; +} + +// if (Pv) memw(Ii) = Rt +RzILOpEffect *hex_il_op_s4_pstorerit_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // mem_store_ut32(EA, ((ut32) Rt)); + RzILOpEffect *ms_cast_ut32_10_11 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Rt)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_12 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut32(EA, ((ut32) Rt))); + RzILOpEffect *seq_then_13 = ms_cast_ut32_10_11; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_14 = c_call_12; + + // if ((((st32) Pv) & 0x1)) {seq(mem_store_ut32(EA, ((ut32) Rt)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_15 = BRANCH(NON_ZERO(op_AND_8), seq_then_13, seq_else_14); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_15); + return instruction_sequence; +} + +// if (Pv) memw(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut32(EA, ((ut32) Rt)); + RzILOpEffect *ms_cast_ut32_15_16 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Rt)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_17 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut32(EA, ((ut32) Rt))); + RzILOpEffect *seq_then_18 = ms_cast_ut32_15_16; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_19 = c_call_17; + + // if ((((st32) Pv) & 0x1)) {seq(mem_store_ut32(EA, ((ut32) Rt)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_20 = BRANCH(NON_ZERO(op_AND_13), seq_then_18, seq_else_19); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_20); + return instruction_sequence; +} + +// if (Pv.new) memw(Ii) = Rt +RzILOpEffect *hex_il_op_s4_pstoreritnew_abs(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = u; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("u")); + + // mem_store_ut32(EA, ((ut32) Rt)); + RzILOpEffect *ms_cast_ut32_10_11 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Rt)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_12 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut32(EA, ((ut32) Rt))); + RzILOpEffect *seq_then_13 = ms_cast_ut32_10_11; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_14 = c_call_12; + + // if ((((st32) Pv_new) & 0x1)) {seq(mem_store_ut32(EA, ((ut32) Rt)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_8 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_15 = BRANCH(NON_ZERO(op_AND_8), seq_then_13, seq_else_14); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_3, branch_15); + return instruction_sequence; +} + +// if (Pv.new) memw(Rs+Ii) = Rt +RzILOpEffect *hex_il_op_s4_pstoreritnew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut32(EA, ((ut32) Rt)); + RzILOpEffect *ms_cast_ut32_13_14 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Rt)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_15 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut32(EA, ((ut32) Rt))); + RzILOpEffect *seq_then_16 = ms_cast_ut32_13_14; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_17 = c_call_15; + + // if ((((st32) Pv_new) & 0x1)) {seq(mem_store_ut32(EA, ((ut32) Rt)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_18 = BRANCH(NON_ZERO(op_AND_11), seq_then_16, seq_else_17); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_0, op_ASSIGN_6, branch_18); + return instruction_sequence; +} + +// if (Pv.new) memw(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut32(EA, ((ut32) Rt)); + RzILOpEffect *ms_cast_ut32_15_16 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Rt)); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_17 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(mem_store_ut32(EA, ((ut32) Rt))); + RzILOpEffect *seq_then_18 = ms_cast_ut32_15_16; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_19 = c_call_17; + + // if ((((st32) Pv_new) & 0x1)) {seq(mem_store_ut32(EA, ((ut32) Rt)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_13 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_20 = BRANCH(NON_ZERO(op_AND_13), seq_then_18, seq_else_19); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, branch_20); + return instruction_sequence; +} + +// memd_locked(Rs,Pd) = Rtt +RzILOpEffect *hex_il_op_s4_stored_locked(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// memd_rl(Rs):at = Rtt +RzILOpEffect *hex_il_op_s4_stored_rl_at_vi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // EA = ((ut32) Rs); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, Rs)); + + // mem_store_ut64(EA, ((ut64) Rtt)); + RzILOpEffect *ms_cast_ut64_6_7 = STOREW(VARL("EA"), CAST(64, IL_FALSE, Rtt)); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_3, ms_cast_ut64_6_7); + return instruction_sequence; +} + +// memd_rl(Rs):st = Rtt +RzILOpEffect *hex_il_op_s4_stored_rl_st_vi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // EA = ((ut32) Rs); + RzILOpEffect *op_ASSIGN_3 = SETL("EA", CAST(32, IL_FALSE, Rs)); + + // mem_store_ut64(EA, ((ut64) Rtt)); + RzILOpEffect *ms_cast_ut64_6_7 = STOREW(VARL("EA"), CAST(64, IL_FALSE, Rtt)); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_3, ms_cast_ut64_6_7); + return instruction_sequence; +} + +// memb(Rs+Ii) = II +RzILOpEffect *hex_il_op_s4_storeirb_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + RzILOpPure *S = SN(32, (st32)ISA2IMM(hi, 'S')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // S = S; + RzILOpEffect *imm_assign_8 = SETL("S", S); + + // mem_store_ut8(EA, ((ut8) S)); + RzILOpEffect *ms_cast_ut8_10_11 = STOREW(VARL("EA"), CAST(8, IL_FALSE, VARL("S"))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_2, imm_assign_8, op_ASSIGN_6, ms_cast_ut8_10_11); + return instruction_sequence; +} + +// if (!Pv) memb(Rs+Ii) = II +RzILOpEffect *hex_il_op_s4_storeirbf_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + RzILOpPure *S = SN(32, (st32)ISA2IMM(hi, 'S')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // S = S; + RzILOpEffect *imm_assign_13 = SETL("S", S); + + // mem_store_ut8(EA, ((ut8) S)); + RzILOpEffect *ms_cast_ut8_15_16 = STOREW(VARL("EA"), CAST(8, IL_FALSE, VARL("S"))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_17 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(S; mem_store_ut8(EA, ((ut8) S))); + RzILOpEffect *seq_then_18 = ms_cast_ut8_15_16; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_19 = c_call_17; + + // if (! (((st32) Pv) & 0x1)) {seq(S; mem_store_ut8(EA, ((ut8) S)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_12 = INV(NON_ZERO(op_AND_11)); + RzILOpEffect *branch_20 = BRANCH(op_INV_12, seq_then_18, seq_else_19); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_2, imm_assign_13, op_ASSIGN_6, branch_20); + return instruction_sequence; +} + +// if (!Pv.new) memb(Rs+Ii) = II +RzILOpEffect *hex_il_op_s4_storeirbfnew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + RzILOpPure *S = SN(32, (st32)ISA2IMM(hi, 'S')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // S = S; + RzILOpEffect *imm_assign_13 = SETL("S", S); + + // mem_store_ut8(EA, ((ut8) S)); + RzILOpEffect *ms_cast_ut8_15_16 = STOREW(VARL("EA"), CAST(8, IL_FALSE, VARL("S"))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_17 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(S; mem_store_ut8(EA, ((ut8) S))); + RzILOpEffect *seq_then_18 = ms_cast_ut8_15_16; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_19 = c_call_17; + + // if (! (((st32) Pv_new) & 0x1)) {seq(S; mem_store_ut8(EA, ((ut8) S)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_12 = INV(NON_ZERO(op_AND_11)); + RzILOpEffect *branch_20 = BRANCH(op_INV_12, seq_then_18, seq_else_19); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_2, imm_assign_13, op_ASSIGN_6, branch_20); + return instruction_sequence; +} + +// if (Pv) memb(Rs+Ii) = II +RzILOpEffect *hex_il_op_s4_storeirbt_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + RzILOpPure *S = SN(32, (st32)ISA2IMM(hi, 'S')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // S = S; + RzILOpEffect *imm_assign_12 = SETL("S", S); + + // mem_store_ut8(EA, ((ut8) S)); + RzILOpEffect *ms_cast_ut8_14_15 = STOREW(VARL("EA"), CAST(8, IL_FALSE, VARL("S"))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_16 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(S; mem_store_ut8(EA, ((ut8) S))); + RzILOpEffect *seq_then_17 = ms_cast_ut8_14_15; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_18 = c_call_16; + + // if ((((st32) Pv) & 0x1)) {seq(S; mem_store_ut8(EA, ((ut8) S)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_19 = BRANCH(NON_ZERO(op_AND_11), seq_then_17, seq_else_18); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_2, imm_assign_12, op_ASSIGN_6, branch_19); + return instruction_sequence; +} + +// if (Pv.new) memb(Rs+Ii) = II +RzILOpEffect *hex_il_op_s4_storeirbtnew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + RzILOpPure *S = SN(32, (st32)ISA2IMM(hi, 'S')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // S = S; + RzILOpEffect *imm_assign_12 = SETL("S", S); + + // mem_store_ut8(EA, ((ut8) S)); + RzILOpEffect *ms_cast_ut8_14_15 = STOREW(VARL("EA"), CAST(8, IL_FALSE, VARL("S"))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_16 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(S; mem_store_ut8(EA, ((ut8) S))); + RzILOpEffect *seq_then_17 = ms_cast_ut8_14_15; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_18 = c_call_16; + + // if ((((st32) Pv_new) & 0x1)) {seq(S; mem_store_ut8(EA, ((ut8) S)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_19 = BRANCH(NON_ZERO(op_AND_11), seq_then_17, seq_else_18); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_2, imm_assign_12, op_ASSIGN_6, branch_19); + return instruction_sequence; +} + +// memh(Rs+Ii) = II +RzILOpEffect *hex_il_op_s4_storeirh_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + RzILOpPure *S = SN(32, (st32)ISA2IMM(hi, 'S')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // S = S; + RzILOpEffect *imm_assign_8 = SETL("S", S); + + // mem_store_ut16(EA, ((ut16) S)); + RzILOpEffect *ms_cast_ut16_10_11 = STOREW(VARL("EA"), CAST(16, IL_FALSE, VARL("S"))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_2, imm_assign_8, op_ASSIGN_6, ms_cast_ut16_10_11); + return instruction_sequence; +} + +// if (!Pv) memh(Rs+Ii) = II +RzILOpEffect *hex_il_op_s4_storeirhf_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + RzILOpPure *S = SN(32, (st32)ISA2IMM(hi, 'S')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // S = S; + RzILOpEffect *imm_assign_13 = SETL("S", S); + + // mem_store_ut16(EA, ((ut16) S)); + RzILOpEffect *ms_cast_ut16_15_16 = STOREW(VARL("EA"), CAST(16, IL_FALSE, VARL("S"))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_17 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(S; mem_store_ut16(EA, ((ut16) S))); + RzILOpEffect *seq_then_18 = ms_cast_ut16_15_16; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_19 = c_call_17; + + // if (! (((st32) Pv) & 0x1)) {seq(S; mem_store_ut16(EA, ((ut16) S)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_12 = INV(NON_ZERO(op_AND_11)); + RzILOpEffect *branch_20 = BRANCH(op_INV_12, seq_then_18, seq_else_19); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_2, imm_assign_13, op_ASSIGN_6, branch_20); + return instruction_sequence; +} + +// if (!Pv.new) memh(Rs+Ii) = II +RzILOpEffect *hex_il_op_s4_storeirhfnew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + RzILOpPure *S = SN(32, (st32)ISA2IMM(hi, 'S')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // S = S; + RzILOpEffect *imm_assign_13 = SETL("S", S); + + // mem_store_ut16(EA, ((ut16) S)); + RzILOpEffect *ms_cast_ut16_15_16 = STOREW(VARL("EA"), CAST(16, IL_FALSE, VARL("S"))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_17 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(S; mem_store_ut16(EA, ((ut16) S))); + RzILOpEffect *seq_then_18 = ms_cast_ut16_15_16; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_19 = c_call_17; + + // if (! (((st32) Pv_new) & 0x1)) {seq(S; mem_store_ut16(EA, ((ut16) S)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_12 = INV(NON_ZERO(op_AND_11)); + RzILOpEffect *branch_20 = BRANCH(op_INV_12, seq_then_18, seq_else_19); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_2, imm_assign_13, op_ASSIGN_6, branch_20); + return instruction_sequence; +} + +// if (Pv) memh(Rs+Ii) = II +RzILOpEffect *hex_il_op_s4_storeirht_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + RzILOpPure *S = SN(32, (st32)ISA2IMM(hi, 'S')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // S = S; + RzILOpEffect *imm_assign_12 = SETL("S", S); + + // mem_store_ut16(EA, ((ut16) S)); + RzILOpEffect *ms_cast_ut16_14_15 = STOREW(VARL("EA"), CAST(16, IL_FALSE, VARL("S"))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_16 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(S; mem_store_ut16(EA, ((ut16) S))); + RzILOpEffect *seq_then_17 = ms_cast_ut16_14_15; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_18 = c_call_16; + + // if ((((st32) Pv) & 0x1)) {seq(S; mem_store_ut16(EA, ((ut16) S)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_19 = BRANCH(NON_ZERO(op_AND_11), seq_then_17, seq_else_18); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_2, imm_assign_12, op_ASSIGN_6, branch_19); + return instruction_sequence; +} + +// if (Pv.new) memh(Rs+Ii) = II +RzILOpEffect *hex_il_op_s4_storeirhtnew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + RzILOpPure *S = SN(32, (st32)ISA2IMM(hi, 'S')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // S = S; + RzILOpEffect *imm_assign_12 = SETL("S", S); + + // mem_store_ut16(EA, ((ut16) S)); + RzILOpEffect *ms_cast_ut16_14_15 = STOREW(VARL("EA"), CAST(16, IL_FALSE, VARL("S"))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_16 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(S; mem_store_ut16(EA, ((ut16) S))); + RzILOpEffect *seq_then_17 = ms_cast_ut16_14_15; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_18 = c_call_16; + + // if ((((st32) Pv_new) & 0x1)) {seq(S; mem_store_ut16(EA, ((ut16) S)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_19 = BRANCH(NON_ZERO(op_AND_11), seq_then_17, seq_else_18); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_2, imm_assign_12, op_ASSIGN_6, branch_19); + return instruction_sequence; +} + +// memw(Rs+Ii) = II +RzILOpEffect *hex_il_op_s4_storeiri_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + RzILOpPure *S = SN(32, (st32)ISA2IMM(hi, 'S')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // S = S; + RzILOpEffect *imm_assign_8 = SETL("S", S); + + // mem_store_ut32(EA, ((ut32) S)); + RzILOpEffect *ms_cast_ut32_10_11 = STOREW(VARL("EA"), CAST(32, IL_FALSE, VARL("S"))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_2, imm_assign_8, op_ASSIGN_6, ms_cast_ut32_10_11); + return instruction_sequence; +} + +// if (!Pv) memw(Rs+Ii) = II +RzILOpEffect *hex_il_op_s4_storeirif_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + RzILOpPure *S = SN(32, (st32)ISA2IMM(hi, 'S')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // S = S; + RzILOpEffect *imm_assign_13 = SETL("S", S); + + // mem_store_ut32(EA, ((ut32) S)); + RzILOpEffect *ms_cast_ut32_15_16 = STOREW(VARL("EA"), CAST(32, IL_FALSE, VARL("S"))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_17 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(S; mem_store_ut32(EA, ((ut32) S))); + RzILOpEffect *seq_then_18 = ms_cast_ut32_15_16; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_19 = c_call_17; + + // if (! (((st32) Pv) & 0x1)) {seq(S; mem_store_ut32(EA, ((ut32) S)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpPure *op_INV_12 = INV(NON_ZERO(op_AND_11)); + RzILOpEffect *branch_20 = BRANCH(op_INV_12, seq_then_18, seq_else_19); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_2, imm_assign_13, op_ASSIGN_6, branch_20); + return instruction_sequence; +} + +// if (!Pv.new) memw(Rs+Ii) = II +RzILOpEffect *hex_il_op_s4_storeirifnew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + RzILOpPure *S = SN(32, (st32)ISA2IMM(hi, 'S')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // S = S; + RzILOpEffect *imm_assign_13 = SETL("S", S); + + // mem_store_ut32(EA, ((ut32) S)); + RzILOpEffect *ms_cast_ut32_15_16 = STOREW(VARL("EA"), CAST(32, IL_FALSE, VARL("S"))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_17 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(S; mem_store_ut32(EA, ((ut32) S))); + RzILOpEffect *seq_then_18 = ms_cast_ut32_15_16; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_19 = c_call_17; + + // if (! (((st32) Pv_new) & 0x1)) {seq(S; mem_store_ut32(EA, ((ut32) S)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpPure *op_INV_12 = INV(NON_ZERO(op_AND_11)); + RzILOpEffect *branch_20 = BRANCH(op_INV_12, seq_then_18, seq_else_19); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_2, imm_assign_13, op_ASSIGN_6, branch_20); + return instruction_sequence; +} + +// if (Pv) memw(Rs+Ii) = II +RzILOpEffect *hex_il_op_s4_storeirit_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_op = ISA2REG(hi, 'v', false); + RzILOpPure *Pv = READ_REG(pkt, Pv_op, false); + RzILOpPure *S = SN(32, (st32)ISA2IMM(hi, 'S')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // S = S; + RzILOpEffect *imm_assign_12 = SETL("S", S); + + // mem_store_ut32(EA, ((ut32) S)); + RzILOpEffect *ms_cast_ut32_14_15 = STOREW(VARL("EA"), CAST(32, IL_FALSE, VARL("S"))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_16 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(S; mem_store_ut32(EA, ((ut32) S))); + RzILOpEffect *seq_then_17 = ms_cast_ut32_14_15; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_18 = c_call_16; + + // if ((((st32) Pv) & 0x1)) {seq(S; mem_store_ut32(EA, ((ut32) S)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv), DUP(Pv)), SN(32, 1)); + RzILOpEffect *branch_19 = BRANCH(NON_ZERO(op_AND_11), seq_then_17, seq_else_18); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_2, imm_assign_12, op_ASSIGN_6, branch_19); + return instruction_sequence; +} + +// if (Pv.new) memw(Rs+Ii) = II +RzILOpEffect *hex_il_op_s4_storeiritnew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Pv_new_op = ISA2REG(hi, 'v', true); + RzILOpPure *Pv_new = READ_REG(pkt, Pv_new_op, true); + RzILOpPure *S = SN(32, (st32)ISA2IMM(hi, 'S')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // S = S; + RzILOpEffect *imm_assign_12 = SETL("S", S); + + // mem_store_ut32(EA, ((ut32) S)); + RzILOpEffect *ms_cast_ut32_14_15 = STOREW(VARL("EA"), CAST(32, IL_FALSE, VARL("S"))); + + // HYB(call_pkt, slot); + RzILOpEffect *c_call_16 = HEX_STORE_SLOT_CANCELLED(pkt, hi->slot); + + // seq(S; mem_store_ut32(EA, ((ut32) S))); + RzILOpEffect *seq_then_17 = ms_cast_ut32_14_15; + + // seq(HYB(call_pkt, slot)); + RzILOpEffect *seq_else_18 = c_call_16; + + // if ((((st32) Pv_new) & 0x1)) {seq(S; mem_store_ut32(EA, ((ut32) S)))} else {seq(HYB(call_pkt, slot))}; + RzILOpPure *op_AND_11 = LOGAND(CAST(32, MSB(Pv_new), DUP(Pv_new)), SN(32, 1)); + RzILOpEffect *branch_19 = BRANCH(NON_ZERO(op_AND_11), seq_then_17, seq_else_18); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_2, imm_assign_12, op_ASSIGN_6, branch_19); + return instruction_sequence; +} + +// memb(Re=II) = Rt +RzILOpEffect *hex_il_op_s4_storerb_ap(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Re_op = ISA2REG(hi, 'e', false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // EA = U; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("U")); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_9 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_11 = LOGAND(op_RSHIFT_9, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_13_14 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_11), DUP(op_AND_11)))); + + // Re = ((st32) U); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Re_op, CAST(32, IL_FALSE, VARL("U"))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, op_ASSIGN_3, ms_cast_ut8_13_14, op_ASSIGN_17); + return instruction_sequence; +} + +// memb(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_18_19 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_16), DUP(op_AND_16)))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, ms_cast_ut8_18_19); + return instruction_sequence; +} + +// memb(Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // u = u; + RzILOpEffect *imm_assign_4 = SETL("u", u); + + // EA = U + ((ut32) (Ru << u)); + RzILOpPure *op_LSHIFT_6 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_8 = ADD(VARL("U"), CAST(32, IL_FALSE, op_LSHIFT_6)); + RzILOpEffect *op_ASSIGN_9 = SETL("EA", op_ADD_8); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_17 = LOGAND(op_RSHIFT_15, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_19_20 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_17), DUP(op_AND_17)))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, imm_assign_4, op_ASSIGN_9, ms_cast_ut8_19_20); + return instruction_sequence; +} + +// memb(Re=II) = Nt.new +RzILOpEffect *hex_il_op_s4_storerbnew_ap(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + const HexOp *Re_op = ISA2REG(hi, 'e', false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // EA = U; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("U")); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_9 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_11 = LOGAND(op_RSHIFT_9, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_13_14 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_11), DUP(op_AND_11)))); + + // Re = ((st32) U); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Re_op, CAST(32, IL_FALSE, VARL("U"))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, op_ASSIGN_3, ms_cast_ut8_13_14, op_ASSIGN_17); + return instruction_sequence; +} + +// memb(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_18_19 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_16), DUP(op_AND_16)))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, ms_cast_ut8_18_19); + return instruction_sequence; +} + +// memb(Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // u = u; + RzILOpEffect *imm_assign_4 = SETL("u", u); + + // EA = U + ((ut32) (Ru << u)); + RzILOpPure *op_LSHIFT_6 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_8 = ADD(VARL("U"), CAST(32, IL_FALSE, op_LSHIFT_6)); + RzILOpEffect *op_ASSIGN_9 = SETL("EA", op_ADD_8); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Nt_new >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_17 = LOGAND(op_RSHIFT_15, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_19_20 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_17), DUP(op_AND_17)))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, imm_assign_4, op_ASSIGN_9, ms_cast_ut8_19_20); + return instruction_sequence; +} + +// memd(Re=II) = Rtt +RzILOpEffect *hex_il_op_s4_storerd_ap(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Re_op = ISA2REG(hi, 'e', false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // EA = U; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("U")); + + // mem_store_ut64(EA, ((ut64) Rtt)); + RzILOpEffect *ms_cast_ut64_6_7 = STOREW(VARL("EA"), CAST(64, IL_FALSE, Rtt)); + + // Re = ((st32) U); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, Re_op, CAST(32, IL_FALSE, VARL("U"))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, op_ASSIGN_3, ms_cast_ut64_6_7, op_ASSIGN_10); + return instruction_sequence; +} + +// memd(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut64(EA, ((ut64) Rtt)); + RzILOpEffect *ms_cast_ut64_11_12 = STOREW(VARL("EA"), CAST(64, IL_FALSE, Rtt)); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, ms_cast_ut64_11_12); + return instruction_sequence; +} + +// memd(Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // u = u; + RzILOpEffect *imm_assign_4 = SETL("u", u); + + // EA = U + ((ut32) (Ru << u)); + RzILOpPure *op_LSHIFT_6 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_8 = ADD(VARL("U"), CAST(32, IL_FALSE, op_LSHIFT_6)); + RzILOpEffect *op_ASSIGN_9 = SETL("EA", op_ADD_8); + + // mem_store_ut64(EA, ((ut64) Rtt)); + RzILOpEffect *ms_cast_ut64_12_13 = STOREW(VARL("EA"), CAST(64, IL_FALSE, Rtt)); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, imm_assign_4, op_ASSIGN_9, ms_cast_ut64_12_13); + return instruction_sequence; +} + +// memh(Re=II) = Rt.h +RzILOpEffect *hex_il_op_s4_storerf_ap(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Re_op = ISA2REG(hi, 'e', false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // EA = U; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("U")); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_9 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_11 = LOGAND(op_RSHIFT_9, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_13_14 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_11), DUP(op_AND_11)))); + + // Re = ((st32) U); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Re_op, CAST(32, IL_FALSE, VARL("U"))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, op_ASSIGN_3, ms_cast_ut16_13_14, op_ASSIGN_17); + return instruction_sequence; +} + +// memh(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_18_19 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_16), DUP(op_AND_16)))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, ms_cast_ut16_18_19); + return instruction_sequence; +} + +// memh(Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // u = u; + RzILOpEffect *imm_assign_4 = SETL("u", u); + + // EA = U + ((ut32) (Ru << u)); + RzILOpPure *op_LSHIFT_6 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_8 = ADD(VARL("U"), CAST(32, IL_FALSE, op_LSHIFT_6)); + RzILOpEffect *op_ASSIGN_9 = SETL("EA", op_ADD_8); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x10) & 0xffff)))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rt, SN(32, 16)); + RzILOpPure *op_AND_17 = LOGAND(op_RSHIFT_15, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_19_20 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_17), DUP(op_AND_17)))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, imm_assign_4, op_ASSIGN_9, ms_cast_ut16_19_20); + return instruction_sequence; +} + +// memh(Re=II) = Rt +RzILOpEffect *hex_il_op_s4_storerh_ap(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Re_op = ISA2REG(hi, 'e', false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // EA = U; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("U")); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_9 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_11 = LOGAND(op_RSHIFT_9, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_13_14 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_11), DUP(op_AND_11)))); + + // Re = ((st32) U); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Re_op, CAST(32, IL_FALSE, VARL("U"))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, op_ASSIGN_3, ms_cast_ut16_13_14, op_ASSIGN_17); + return instruction_sequence; +} + +// memh(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_18_19 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_16), DUP(op_AND_16)))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, ms_cast_ut16_18_19); + return instruction_sequence; +} + +// memh(Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // u = u; + RzILOpEffect *imm_assign_4 = SETL("u", u); + + // EA = U + ((ut32) (Ru << u)); + RzILOpPure *op_LSHIFT_6 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_8 = ADD(VARL("U"), CAST(32, IL_FALSE, op_LSHIFT_6)); + RzILOpEffect *op_ASSIGN_9 = SETL("EA", op_ADD_8); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_17 = LOGAND(op_RSHIFT_15, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_19_20 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_17), DUP(op_AND_17)))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, imm_assign_4, op_ASSIGN_9, ms_cast_ut16_19_20); + return instruction_sequence; +} + +// memh(Re=II) = Nt.new +RzILOpEffect *hex_il_op_s4_storerhnew_ap(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + const HexOp *Re_op = ISA2REG(hi, 'e', false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // EA = U; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("U")); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_9 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_11 = LOGAND(op_RSHIFT_9, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_13_14 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_11), DUP(op_AND_11)))); + + // Re = ((st32) U); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Re_op, CAST(32, IL_FALSE, VARL("U"))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, op_ASSIGN_3, ms_cast_ut16_13_14, op_ASSIGN_17); + return instruction_sequence; +} + +// memh(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_14 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_16 = LOGAND(op_RSHIFT_14, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_18_19 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_16), DUP(op_AND_16)))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, ms_cast_ut16_18_19); + return instruction_sequence; +} + +// memh(Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // u = u; + RzILOpEffect *imm_assign_4 = SETL("u", u); + + // EA = U + ((ut32) (Ru << u)); + RzILOpPure *op_LSHIFT_6 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_8 = ADD(VARL("U"), CAST(32, IL_FALSE, op_LSHIFT_6)); + RzILOpEffect *op_ASSIGN_9 = SETL("EA", op_ADD_8); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Nt_new >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Nt_new, SN(32, 0)); + RzILOpPure *op_AND_17 = LOGAND(op_RSHIFT_15, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_19_20 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_17), DUP(op_AND_17)))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, imm_assign_4, op_ASSIGN_9, ms_cast_ut16_19_20); + return instruction_sequence; +} + +// memw(Re=II) = Rt +RzILOpEffect *hex_il_op_s4_storeri_ap(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + const HexOp *Re_op = ISA2REG(hi, 'e', false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // EA = U; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("U")); + + // mem_store_ut32(EA, ((ut32) Rt)); + RzILOpEffect *ms_cast_ut32_6_7 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Rt)); + + // Re = ((st32) U); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, Re_op, CAST(32, IL_FALSE, VARL("U"))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, op_ASSIGN_3, ms_cast_ut32_6_7, op_ASSIGN_10); + return instruction_sequence; +} + +// memw(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut32(EA, ((ut32) Rt)); + RzILOpEffect *ms_cast_ut32_11_12 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Rt)); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, ms_cast_ut32_11_12); + return instruction_sequence; +} + +// memw(Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // u = u; + RzILOpEffect *imm_assign_4 = SETL("u", u); + + // EA = U + ((ut32) (Ru << u)); + RzILOpPure *op_LSHIFT_6 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_8 = ADD(VARL("U"), CAST(32, IL_FALSE, op_LSHIFT_6)); + RzILOpEffect *op_ASSIGN_9 = SETL("EA", op_ADD_8); + + // mem_store_ut32(EA, ((ut32) Rt)); + RzILOpEffect *ms_cast_ut32_12_13 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Rt)); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, imm_assign_4, op_ASSIGN_9, ms_cast_ut32_12_13); + return instruction_sequence; +} + +// memw(Re=II) = Nt.new +RzILOpEffect *hex_il_op_s4_storerinew_ap(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + const HexOp *Re_op = ISA2REG(hi, 'e', false); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // EA = U; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", VARL("U")); + + // mem_store_ut32(EA, ((ut32) Nt_new)); + RzILOpEffect *ms_cast_ut32_6_7 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Nt_new)); + + // Re = ((st32) U); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, Re_op, CAST(32, IL_FALSE, VARL("U"))); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, op_ASSIGN_3, ms_cast_ut32_6_7, op_ASSIGN_10); + return instruction_sequence; +} + +// memw(Rs+Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // u = u; + RzILOpEffect *imm_assign_3 = SETL("u", u); + + // EA = ((ut32) Rs + (Ru << u)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_6 = ADD(Rs, op_LSHIFT_5); + RzILOpEffect *op_ASSIGN_8 = SETL("EA", CAST(32, IL_FALSE, op_ADD_6)); + + // mem_store_ut32(EA, ((ut32) Nt_new)); + RzILOpEffect *ms_cast_ut32_11_12 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Nt_new)); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_3, op_ASSIGN_8, ms_cast_ut32_11_12); + return instruction_sequence; +} + +// memw(Ru<insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + // Declare: ut32 EA; + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp Nt_new_op = NREG2OP(bundle, 't'); + RzILOpPure *Nt_new = READ_REG(pkt, &Nt_new_op, true); + + // U = U; + RzILOpEffect *imm_assign_0 = SETL("U", U); + + // u = u; + RzILOpEffect *imm_assign_4 = SETL("u", u); + + // EA = U + ((ut32) (Ru << u)); + RzILOpPure *op_LSHIFT_6 = SHIFTL0(Ru, VARL("u")); + RzILOpPure *op_ADD_8 = ADD(VARL("U"), CAST(32, IL_FALSE, op_LSHIFT_6)); + RzILOpEffect *op_ASSIGN_9 = SETL("EA", op_ADD_8); + + // mem_store_ut32(EA, ((ut32) Nt_new)); + RzILOpEffect *ms_cast_ut32_12_13 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Nt_new)); + + RzILOpEffect *instruction_sequence = SEQN(4, imm_assign_0, imm_assign_4, op_ASSIGN_9, ms_cast_ut32_12_13); + return instruction_sequence; +} + +// Rd = add(Rs,sub(Ii,Ru)) +RzILOpEffect *hex_il_op_s4_subaddi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + const HexOp *Ru_op = ISA2REG(hi, 'u', false); + RzILOpPure *Ru = READ_REG(pkt, Ru_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + + // s = s; + RzILOpEffect *imm_assign_4 = SETL("s", s); + + // Rd = Rs - Ru + s; + RzILOpPure *op_SUB_3 = SUB(Rs, Ru); + RzILOpPure *op_ADD_6 = ADD(op_SUB_3, VARL("s")); + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rd_op, op_ADD_6); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_4, op_ASSIGN_7); + return instruction_sequence; +} + +// Rx = sub(Ii,asl(Rxin,II)) +RzILOpEffect *hex_il_op_s4_subi_asl_ri(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // U = U; + RzILOpEffect *imm_assign_3 = SETL("U", U); + + // Rx = ((st32) u - ((ut32) (Rx << U))); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(READ_REG(pkt, Rx_op, false), VARL("U")); + RzILOpPure *op_SUB_7 = SUB(VARL("u"), CAST(32, IL_FALSE, op_LSHIFT_5)); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_SUB_7)); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_1, imm_assign_3, op_ASSIGN_9); + return instruction_sequence; +} + +// Rx = sub(Ii,lsr(Rxin,II)) +RzILOpEffect *hex_il_op_s4_subi_lsr_ri(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + RzILOpPure *U = UN(32, (ut32)ISA2IMM(hi, 'U')); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // U = U; + RzILOpEffect *imm_assign_4 = SETL("U", U); + + // Rx = ((st32) u - (((ut32) Rx) >> U)); + RzILOpPure *op_RSHIFT_6 = SHIFTR0(CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false)), VARL("U")); + RzILOpPure *op_SUB_7 = SUB(VARL("u"), op_RSHIFT_6); + RzILOpEffect *op_ASSIGN_9 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_SUB_7)); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_1, imm_assign_4, op_ASSIGN_9); + return instruction_sequence; +} + +// Rdd = vrcrotate(Rss,Rt,Ii) +RzILOpEffect *hex_il_op_s4_vrcrotate(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rxx += vrcrotate(Rss,Rt,Ii) +RzILOpEffect *hex_il_op_s4_vrcrotate_acc(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rdd = vxaddsubh(Rss,Rtt):sat +RzILOpEffect *hex_il_op_s4_vxaddsubh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_79 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_15, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_24 = SHIFTRA(Rtt, SN(32, 16)); + RzILOpPure *op_AND_27 = LOGAND(op_RSHIFT_24, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_31 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(CAST(16, MSB(op_AND_27), DUP(op_AND_27))), CAST(16, MSB(DUP(op_AND_27)), DUP(op_AND_27)))); + RzILOpPure *op_RSHIFT_40 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_43 = LOGAND(op_RSHIFT_40, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_48 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_51 = LOGAND(op_RSHIFT_48, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_55 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_43), DUP(op_AND_43))), CAST(16, MSB(DUP(op_AND_43)), DUP(op_AND_43))), CAST(32, MSB(CAST(16, MSB(op_AND_51), DUP(op_AND_51))), CAST(16, MSB(DUP(op_AND_51)), DUP(op_AND_51)))); + RzILOpPure *op_EQ_57 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_31), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_ADD_55), DUP(op_ADD_55))); + RzILOpPure *op_RSHIFT_83 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_86 = LOGAND(op_RSHIFT_83, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_91 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_94 = LOGAND(op_RSHIFT_91, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_98 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_86), DUP(op_AND_86))), CAST(16, MSB(DUP(op_AND_86)), DUP(op_AND_86))), CAST(32, MSB(CAST(16, MSB(op_AND_94), DUP(op_AND_94))), CAST(16, MSB(DUP(op_AND_94)), DUP(op_AND_94)))); + RzILOpPure *op_LT_100 = SLT(op_ADD_98, SN(32, 0)); + RzILOpPure *op_LSHIFT_105 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_106 = NEG(op_LSHIFT_105); + RzILOpPure *op_LSHIFT_111 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_114 = SUB(op_LSHIFT_111, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_115 = ITE(op_LT_100, op_NEG_106, op_SUB_114); + RzILOpEffect *gcc_expr_116 = BRANCH(op_EQ_57, EMPTY(), set_usr_field_call_79); + + // h_tmp584 = HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_118 = SETL("h_tmp584", cond_115); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rss > ...; + RzILOpEffect *seq_119 = SEQN(2, gcc_expr_116, op_ASSIGN_hybrid_tmp_118); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x0)))) | (((ut64) (((sextract64(((ut64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))))) ? ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) : h_tmp584) & ((st64) 0xffff))) << 0x0))); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_61 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_64 = LOGAND(op_RSHIFT_61, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_69 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_72 = LOGAND(op_RSHIFT_69, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_76 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_64), DUP(op_AND_64))), CAST(16, MSB(DUP(op_AND_64)), DUP(op_AND_64))), CAST(32, MSB(CAST(16, MSB(op_AND_72), DUP(op_AND_72))), CAST(16, MSB(DUP(op_AND_72)), DUP(op_AND_72)))); + RzILOpPure *cond_121 = ITE(DUP(op_EQ_57), CAST(64, MSB(op_ADD_76), DUP(op_ADD_76)), VARL("h_tmp584")); + RzILOpPure *op_AND_124 = LOGAND(cond_121, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_129 = SHIFTL0(CAST(64, IL_FALSE, op_AND_124), SN(32, 0)); + RzILOpPure *op_OR_131 = LOGOR(CAST(64, IL_FALSE, op_AND_7), op_LSHIFT_129); + RzILOpEffect *op_ASSIGN_133 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_131)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((R ...; + RzILOpEffect *seq_134 = SEQN(2, seq_119, op_ASSIGN_133); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_212 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_149 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_152 = LOGAND(op_RSHIFT_149, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_157 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_160 = LOGAND(op_RSHIFT_157, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_164 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_152), DUP(op_AND_152))), CAST(16, MSB(DUP(op_AND_152)), DUP(op_AND_152))), CAST(32, MSB(CAST(16, MSB(op_AND_160), DUP(op_AND_160))), CAST(16, MSB(DUP(op_AND_160)), DUP(op_AND_160)))); + RzILOpPure *op_RSHIFT_173 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_176 = LOGAND(op_RSHIFT_173, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_181 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_184 = LOGAND(op_RSHIFT_181, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_188 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_176), DUP(op_AND_176))), CAST(16, MSB(DUP(op_AND_176)), DUP(op_AND_176))), CAST(32, MSB(CAST(16, MSB(op_AND_184), DUP(op_AND_184))), CAST(16, MSB(DUP(op_AND_184)), DUP(op_AND_184)))); + RzILOpPure *op_EQ_190 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_164), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_SUB_188), DUP(op_SUB_188))); + RzILOpPure *op_RSHIFT_216 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_219 = LOGAND(op_RSHIFT_216, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_224 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_227 = LOGAND(op_RSHIFT_224, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_231 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_219), DUP(op_AND_219))), CAST(16, MSB(DUP(op_AND_219)), DUP(op_AND_219))), CAST(32, MSB(CAST(16, MSB(op_AND_227), DUP(op_AND_227))), CAST(16, MSB(DUP(op_AND_227)), DUP(op_AND_227)))); + RzILOpPure *op_LT_233 = SLT(op_SUB_231, SN(32, 0)); + RzILOpPure *op_LSHIFT_238 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_239 = NEG(op_LSHIFT_238); + RzILOpPure *op_LSHIFT_244 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_247 = SUB(op_LSHIFT_244, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_248 = ITE(op_LT_233, op_NEG_239, op_SUB_247); + RzILOpEffect *gcc_expr_249 = BRANCH(op_EQ_190, EMPTY(), set_usr_field_call_212); + + // h_tmp585 = HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_251 = SETL("h_tmp585", cond_248); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rss > ...; + RzILOpEffect *seq_252 = SEQN(2, gcc_expr_249, op_ASSIGN_hybrid_tmp_251); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x10)))) | (((ut64) (((sextract64(((ut64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))))) ? ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) : h_tmp585) & ((st64) 0xffff))) << 0x10))); + RzILOpPure *op_LSHIFT_140 = SHIFTL0(SN(64, 0xffff), SN(32, 16)); + RzILOpPure *op_NOT_141 = LOGNOT(op_LSHIFT_140); + RzILOpPure *op_AND_142 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_141); + RzILOpPure *op_RSHIFT_194 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_197 = LOGAND(op_RSHIFT_194, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_202 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_205 = LOGAND(op_RSHIFT_202, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_209 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_197), DUP(op_AND_197))), CAST(16, MSB(DUP(op_AND_197)), DUP(op_AND_197))), CAST(32, MSB(CAST(16, MSB(op_AND_205), DUP(op_AND_205))), CAST(16, MSB(DUP(op_AND_205)), DUP(op_AND_205)))); + RzILOpPure *cond_254 = ITE(DUP(op_EQ_190), CAST(64, MSB(op_SUB_209), DUP(op_SUB_209)), VARL("h_tmp585")); + RzILOpPure *op_AND_257 = LOGAND(cond_254, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_262 = SHIFTL0(CAST(64, IL_FALSE, op_AND_257), SN(32, 16)); + RzILOpPure *op_OR_264 = LOGOR(CAST(64, IL_FALSE, op_AND_142), op_LSHIFT_262); + RzILOpEffect *op_ASSIGN_266 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_264)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((R ...; + RzILOpEffect *seq_267 = SEQN(2, seq_252, op_ASSIGN_266); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_345 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_282 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_285 = LOGAND(op_RSHIFT_282, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_290 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_293 = LOGAND(op_RSHIFT_290, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_297 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_285), DUP(op_AND_285))), CAST(16, MSB(DUP(op_AND_285)), DUP(op_AND_285))), CAST(32, MSB(CAST(16, MSB(op_AND_293), DUP(op_AND_293))), CAST(16, MSB(DUP(op_AND_293)), DUP(op_AND_293)))); + RzILOpPure *op_RSHIFT_306 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_309 = LOGAND(op_RSHIFT_306, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_314 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_317 = LOGAND(op_RSHIFT_314, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_321 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_309), DUP(op_AND_309))), CAST(16, MSB(DUP(op_AND_309)), DUP(op_AND_309))), CAST(32, MSB(CAST(16, MSB(op_AND_317), DUP(op_AND_317))), CAST(16, MSB(DUP(op_AND_317)), DUP(op_AND_317)))); + RzILOpPure *op_EQ_323 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_297), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_ADD_321), DUP(op_ADD_321))); + RzILOpPure *op_RSHIFT_349 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_352 = LOGAND(op_RSHIFT_349, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_357 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_360 = LOGAND(op_RSHIFT_357, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_364 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_352), DUP(op_AND_352))), CAST(16, MSB(DUP(op_AND_352)), DUP(op_AND_352))), CAST(32, MSB(CAST(16, MSB(op_AND_360), DUP(op_AND_360))), CAST(16, MSB(DUP(op_AND_360)), DUP(op_AND_360)))); + RzILOpPure *op_LT_366 = SLT(op_ADD_364, SN(32, 0)); + RzILOpPure *op_LSHIFT_371 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_372 = NEG(op_LSHIFT_371); + RzILOpPure *op_LSHIFT_377 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_380 = SUB(op_LSHIFT_377, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_381 = ITE(op_LT_366, op_NEG_372, op_SUB_380); + RzILOpEffect *gcc_expr_382 = BRANCH(op_EQ_323, EMPTY(), set_usr_field_call_345); + + // h_tmp586 = HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_384 = SETL("h_tmp586", cond_381); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rss > ...; + RzILOpEffect *seq_385 = SEQN(2, gcc_expr_382, op_ASSIGN_hybrid_tmp_384); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x20)))) | (((ut64) (((sextract64(((ut64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))))) ? ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) : h_tmp586) & ((st64) 0xffff))) << 0x20))); + RzILOpPure *op_LSHIFT_273 = SHIFTL0(SN(64, 0xffff), SN(32, 0x20)); + RzILOpPure *op_NOT_274 = LOGNOT(op_LSHIFT_273); + RzILOpPure *op_AND_275 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_274); + RzILOpPure *op_RSHIFT_327 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_330 = LOGAND(op_RSHIFT_327, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_335 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_338 = LOGAND(op_RSHIFT_335, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_342 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_330), DUP(op_AND_330))), CAST(16, MSB(DUP(op_AND_330)), DUP(op_AND_330))), CAST(32, MSB(CAST(16, MSB(op_AND_338), DUP(op_AND_338))), CAST(16, MSB(DUP(op_AND_338)), DUP(op_AND_338)))); + RzILOpPure *cond_387 = ITE(DUP(op_EQ_323), CAST(64, MSB(op_ADD_342), DUP(op_ADD_342)), VARL("h_tmp586")); + RzILOpPure *op_AND_390 = LOGAND(cond_387, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_395 = SHIFTL0(CAST(64, IL_FALSE, op_AND_390), SN(32, 0x20)); + RzILOpPure *op_OR_397 = LOGOR(CAST(64, IL_FALSE, op_AND_275), op_LSHIFT_395); + RzILOpEffect *op_ASSIGN_399 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_397)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((R ...; + RzILOpEffect *seq_400 = SEQN(2, seq_385, op_ASSIGN_399); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_478 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_415 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_418 = LOGAND(op_RSHIFT_415, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_423 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_426 = LOGAND(op_RSHIFT_423, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_430 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_418), DUP(op_AND_418))), CAST(16, MSB(DUP(op_AND_418)), DUP(op_AND_418))), CAST(32, MSB(CAST(16, MSB(op_AND_426), DUP(op_AND_426))), CAST(16, MSB(DUP(op_AND_426)), DUP(op_AND_426)))); + RzILOpPure *op_RSHIFT_439 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_442 = LOGAND(op_RSHIFT_439, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_447 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_450 = LOGAND(op_RSHIFT_447, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_454 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_442), DUP(op_AND_442))), CAST(16, MSB(DUP(op_AND_442)), DUP(op_AND_442))), CAST(32, MSB(CAST(16, MSB(op_AND_450), DUP(op_AND_450))), CAST(16, MSB(DUP(op_AND_450)), DUP(op_AND_450)))); + RzILOpPure *op_EQ_456 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_430), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_SUB_454), DUP(op_SUB_454))); + RzILOpPure *op_RSHIFT_482 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_485 = LOGAND(op_RSHIFT_482, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_490 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_493 = LOGAND(op_RSHIFT_490, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_497 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_485), DUP(op_AND_485))), CAST(16, MSB(DUP(op_AND_485)), DUP(op_AND_485))), CAST(32, MSB(CAST(16, MSB(op_AND_493), DUP(op_AND_493))), CAST(16, MSB(DUP(op_AND_493)), DUP(op_AND_493)))); + RzILOpPure *op_LT_499 = SLT(op_SUB_497, SN(32, 0)); + RzILOpPure *op_LSHIFT_504 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_505 = NEG(op_LSHIFT_504); + RzILOpPure *op_LSHIFT_510 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_513 = SUB(op_LSHIFT_510, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_514 = ITE(op_LT_499, op_NEG_505, op_SUB_513); + RzILOpEffect *gcc_expr_515 = BRANCH(op_EQ_456, EMPTY(), set_usr_field_call_478); + + // h_tmp587 = HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_517 = SETL("h_tmp587", cond_514); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rss > ...; + RzILOpEffect *seq_518 = SEQN(2, gcc_expr_515, op_ASSIGN_hybrid_tmp_517); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x30)))) | (((ut64) (((sextract64(((ut64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))))) ? ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) : h_tmp587) & ((st64) 0xffff))) << 0x30))); + RzILOpPure *op_LSHIFT_406 = SHIFTL0(SN(64, 0xffff), SN(32, 0x30)); + RzILOpPure *op_NOT_407 = LOGNOT(op_LSHIFT_406); + RzILOpPure *op_AND_408 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_407); + RzILOpPure *op_RSHIFT_460 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_463 = LOGAND(op_RSHIFT_460, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_468 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_471 = LOGAND(op_RSHIFT_468, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_475 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_463), DUP(op_AND_463))), CAST(16, MSB(DUP(op_AND_463)), DUP(op_AND_463))), CAST(32, MSB(CAST(16, MSB(op_AND_471), DUP(op_AND_471))), CAST(16, MSB(DUP(op_AND_471)), DUP(op_AND_471)))); + RzILOpPure *cond_520 = ITE(DUP(op_EQ_456), CAST(64, MSB(op_SUB_475), DUP(op_SUB_475)), VARL("h_tmp587")); + RzILOpPure *op_AND_523 = LOGAND(cond_520, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_528 = SHIFTL0(CAST(64, IL_FALSE, op_AND_523), SN(32, 0x30)); + RzILOpPure *op_OR_530 = LOGOR(CAST(64, IL_FALSE, op_AND_408), op_LSHIFT_528); + RzILOpEffect *op_ASSIGN_532 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_530)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((R ...; + RzILOpEffect *seq_533 = SEQN(2, seq_518, op_ASSIGN_532); + + RzILOpEffect *instruction_sequence = SEQN(4, seq_134, seq_267, seq_400, seq_533); + return instruction_sequence; +} + +// Rdd = vxaddsubh(Rss,Rtt):rnd:>>1:sat +RzILOpEffect *hex_il_op_s4_vxaddsubhr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_91 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))) + 0x1 >> 0x1)), 0x0, 0x10) == ((st64) (((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))) + 0x1 >> 0x1)))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))) + 0x1 >> 0x1) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_15, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_24 = SHIFTRA(Rtt, SN(32, 16)); + RzILOpPure *op_AND_27 = LOGAND(op_RSHIFT_24, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_31 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(CAST(16, MSB(op_AND_27), DUP(op_AND_27))), CAST(16, MSB(DUP(op_AND_27)), DUP(op_AND_27)))); + RzILOpPure *op_ADD_33 = ADD(op_ADD_31, SN(32, 1)); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(op_ADD_33, SN(32, 1)); + RzILOpPure *op_RSHIFT_44 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_47 = LOGAND(op_RSHIFT_44, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_52 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_55 = LOGAND(op_RSHIFT_52, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_59 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_47), DUP(op_AND_47))), CAST(16, MSB(DUP(op_AND_47)), DUP(op_AND_47))), CAST(32, MSB(CAST(16, MSB(op_AND_55), DUP(op_AND_55))), CAST(16, MSB(DUP(op_AND_55)), DUP(op_AND_55)))); + RzILOpPure *op_ADD_61 = ADD(op_ADD_59, SN(32, 1)); + RzILOpPure *op_RSHIFT_63 = SHIFTRA(op_ADD_61, SN(32, 1)); + RzILOpPure *op_EQ_65 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_35), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_RSHIFT_63), DUP(op_RSHIFT_63))); + RzILOpPure *op_RSHIFT_95 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_98 = LOGAND(op_RSHIFT_95, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_103 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_106 = LOGAND(op_RSHIFT_103, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_110 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_98), DUP(op_AND_98))), CAST(16, MSB(DUP(op_AND_98)), DUP(op_AND_98))), CAST(32, MSB(CAST(16, MSB(op_AND_106), DUP(op_AND_106))), CAST(16, MSB(DUP(op_AND_106)), DUP(op_AND_106)))); + RzILOpPure *op_ADD_112 = ADD(op_ADD_110, SN(32, 1)); + RzILOpPure *op_RSHIFT_114 = SHIFTRA(op_ADD_112, SN(32, 1)); + RzILOpPure *op_LT_116 = SLT(op_RSHIFT_114, SN(32, 0)); + RzILOpPure *op_LSHIFT_121 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_122 = NEG(op_LSHIFT_121); + RzILOpPure *op_LSHIFT_127 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_130 = SUB(op_LSHIFT_127, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_131 = ITE(op_LT_116, op_NEG_122, op_SUB_130); + RzILOpEffect *gcc_expr_132 = BRANCH(op_EQ_65, EMPTY(), set_usr_field_call_91); + + // h_tmp588 = HYB(gcc_expr_if ((sextract64(((ut64) (((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))) + 0x1 >> 0x1)), 0x0, 0x10) == ((st64) (((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))) + 0x1 >> 0x1)))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))) + 0x1 >> 0x1) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_134 = SETL("h_tmp588", cond_131); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st32) ((st16) ((Rss ...; + RzILOpEffect *seq_135 = SEQN(2, gcc_expr_132, op_ASSIGN_hybrid_tmp_134); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x0)))) | (((ut64) (((sextract64(((ut64) (((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))) + 0x1 >> 0x1)), 0x0, 0x10) == ((st64) (((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))) + 0x1 >> 0x1))) ? ((st64) (((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))) + 0x1 >> 0x1)) : h_tmp588) & ((st64) 0xffff))) << 0x0))); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_69 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_72 = LOGAND(op_RSHIFT_69, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_77 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_80 = LOGAND(op_RSHIFT_77, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_84 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_72), DUP(op_AND_72))), CAST(16, MSB(DUP(op_AND_72)), DUP(op_AND_72))), CAST(32, MSB(CAST(16, MSB(op_AND_80), DUP(op_AND_80))), CAST(16, MSB(DUP(op_AND_80)), DUP(op_AND_80)))); + RzILOpPure *op_ADD_86 = ADD(op_ADD_84, SN(32, 1)); + RzILOpPure *op_RSHIFT_88 = SHIFTRA(op_ADD_86, SN(32, 1)); + RzILOpPure *cond_137 = ITE(DUP(op_EQ_65), CAST(64, MSB(op_RSHIFT_88), DUP(op_RSHIFT_88)), VARL("h_tmp588")); + RzILOpPure *op_AND_140 = LOGAND(cond_137, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_145 = SHIFTL0(CAST(64, IL_FALSE, op_AND_140), SN(32, 0)); + RzILOpPure *op_OR_147 = LOGOR(CAST(64, IL_FALSE, op_AND_7), op_LSHIFT_145); + RzILOpEffect *op_ASSIGN_149 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_147)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st32) ((st16) (( ...; + RzILOpEffect *seq_150 = SEQN(2, seq_135, op_ASSIGN_149); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_240 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))) + 0x1 >> 0x1)), 0x0, 0x10) == ((st64) (((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))) + 0x1 >> 0x1)))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))) + 0x1 >> 0x1) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_165 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_168 = LOGAND(op_RSHIFT_165, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_173 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_176 = LOGAND(op_RSHIFT_173, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_180 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_168), DUP(op_AND_168))), CAST(16, MSB(DUP(op_AND_168)), DUP(op_AND_168))), CAST(32, MSB(CAST(16, MSB(op_AND_176), DUP(op_AND_176))), CAST(16, MSB(DUP(op_AND_176)), DUP(op_AND_176)))); + RzILOpPure *op_ADD_182 = ADD(op_SUB_180, SN(32, 1)); + RzILOpPure *op_RSHIFT_184 = SHIFTRA(op_ADD_182, SN(32, 1)); + RzILOpPure *op_RSHIFT_193 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_196 = LOGAND(op_RSHIFT_193, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_201 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_204 = LOGAND(op_RSHIFT_201, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_208 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_196), DUP(op_AND_196))), CAST(16, MSB(DUP(op_AND_196)), DUP(op_AND_196))), CAST(32, MSB(CAST(16, MSB(op_AND_204), DUP(op_AND_204))), CAST(16, MSB(DUP(op_AND_204)), DUP(op_AND_204)))); + RzILOpPure *op_ADD_210 = ADD(op_SUB_208, SN(32, 1)); + RzILOpPure *op_RSHIFT_212 = SHIFTRA(op_ADD_210, SN(32, 1)); + RzILOpPure *op_EQ_214 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_184), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_RSHIFT_212), DUP(op_RSHIFT_212))); + RzILOpPure *op_RSHIFT_244 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_247 = LOGAND(op_RSHIFT_244, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_252 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_255 = LOGAND(op_RSHIFT_252, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_259 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_247), DUP(op_AND_247))), CAST(16, MSB(DUP(op_AND_247)), DUP(op_AND_247))), CAST(32, MSB(CAST(16, MSB(op_AND_255), DUP(op_AND_255))), CAST(16, MSB(DUP(op_AND_255)), DUP(op_AND_255)))); + RzILOpPure *op_ADD_261 = ADD(op_SUB_259, SN(32, 1)); + RzILOpPure *op_RSHIFT_263 = SHIFTRA(op_ADD_261, SN(32, 1)); + RzILOpPure *op_LT_265 = SLT(op_RSHIFT_263, SN(32, 0)); + RzILOpPure *op_LSHIFT_270 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_271 = NEG(op_LSHIFT_270); + RzILOpPure *op_LSHIFT_276 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_279 = SUB(op_LSHIFT_276, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_280 = ITE(op_LT_265, op_NEG_271, op_SUB_279); + RzILOpEffect *gcc_expr_281 = BRANCH(op_EQ_214, EMPTY(), set_usr_field_call_240); + + // h_tmp589 = HYB(gcc_expr_if ((sextract64(((ut64) (((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))) + 0x1 >> 0x1)), 0x0, 0x10) == ((st64) (((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))) + 0x1 >> 0x1)))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))) + 0x1 >> 0x1) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_283 = SETL("h_tmp589", cond_280); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st32) ((st16) ((Rss ...; + RzILOpEffect *seq_284 = SEQN(2, gcc_expr_281, op_ASSIGN_hybrid_tmp_283); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x10)))) | (((ut64) (((sextract64(((ut64) (((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))) + 0x1 >> 0x1)), 0x0, 0x10) == ((st64) (((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))) + 0x1 >> 0x1))) ? ((st64) (((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))) + 0x1 >> 0x1)) : h_tmp589) & ((st64) 0xffff))) << 0x10))); + RzILOpPure *op_LSHIFT_156 = SHIFTL0(SN(64, 0xffff), SN(32, 16)); + RzILOpPure *op_NOT_157 = LOGNOT(op_LSHIFT_156); + RzILOpPure *op_AND_158 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_157); + RzILOpPure *op_RSHIFT_218 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_221 = LOGAND(op_RSHIFT_218, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_226 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_229 = LOGAND(op_RSHIFT_226, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_233 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_221), DUP(op_AND_221))), CAST(16, MSB(DUP(op_AND_221)), DUP(op_AND_221))), CAST(32, MSB(CAST(16, MSB(op_AND_229), DUP(op_AND_229))), CAST(16, MSB(DUP(op_AND_229)), DUP(op_AND_229)))); + RzILOpPure *op_ADD_235 = ADD(op_SUB_233, SN(32, 1)); + RzILOpPure *op_RSHIFT_237 = SHIFTRA(op_ADD_235, SN(32, 1)); + RzILOpPure *cond_286 = ITE(DUP(op_EQ_214), CAST(64, MSB(op_RSHIFT_237), DUP(op_RSHIFT_237)), VARL("h_tmp589")); + RzILOpPure *op_AND_289 = LOGAND(cond_286, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_294 = SHIFTL0(CAST(64, IL_FALSE, op_AND_289), SN(32, 16)); + RzILOpPure *op_OR_296 = LOGOR(CAST(64, IL_FALSE, op_AND_158), op_LSHIFT_294); + RzILOpEffect *op_ASSIGN_298 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_296)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st32) ((st16) (( ...; + RzILOpEffect *seq_299 = SEQN(2, seq_284, op_ASSIGN_298); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_389 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))) + 0x1 >> 0x1)), 0x0, 0x10) == ((st64) (((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))) + 0x1 >> 0x1)))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))) + 0x1 >> 0x1) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_314 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_317 = LOGAND(op_RSHIFT_314, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_322 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_325 = LOGAND(op_RSHIFT_322, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_329 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_317), DUP(op_AND_317))), CAST(16, MSB(DUP(op_AND_317)), DUP(op_AND_317))), CAST(32, MSB(CAST(16, MSB(op_AND_325), DUP(op_AND_325))), CAST(16, MSB(DUP(op_AND_325)), DUP(op_AND_325)))); + RzILOpPure *op_ADD_331 = ADD(op_ADD_329, SN(32, 1)); + RzILOpPure *op_RSHIFT_333 = SHIFTRA(op_ADD_331, SN(32, 1)); + RzILOpPure *op_RSHIFT_342 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_345 = LOGAND(op_RSHIFT_342, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_350 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_353 = LOGAND(op_RSHIFT_350, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_357 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_345), DUP(op_AND_345))), CAST(16, MSB(DUP(op_AND_345)), DUP(op_AND_345))), CAST(32, MSB(CAST(16, MSB(op_AND_353), DUP(op_AND_353))), CAST(16, MSB(DUP(op_AND_353)), DUP(op_AND_353)))); + RzILOpPure *op_ADD_359 = ADD(op_ADD_357, SN(32, 1)); + RzILOpPure *op_RSHIFT_361 = SHIFTRA(op_ADD_359, SN(32, 1)); + RzILOpPure *op_EQ_363 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_333), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_RSHIFT_361), DUP(op_RSHIFT_361))); + RzILOpPure *op_RSHIFT_393 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_396 = LOGAND(op_RSHIFT_393, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_401 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_404 = LOGAND(op_RSHIFT_401, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_408 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_396), DUP(op_AND_396))), CAST(16, MSB(DUP(op_AND_396)), DUP(op_AND_396))), CAST(32, MSB(CAST(16, MSB(op_AND_404), DUP(op_AND_404))), CAST(16, MSB(DUP(op_AND_404)), DUP(op_AND_404)))); + RzILOpPure *op_ADD_410 = ADD(op_ADD_408, SN(32, 1)); + RzILOpPure *op_RSHIFT_412 = SHIFTRA(op_ADD_410, SN(32, 1)); + RzILOpPure *op_LT_414 = SLT(op_RSHIFT_412, SN(32, 0)); + RzILOpPure *op_LSHIFT_419 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_420 = NEG(op_LSHIFT_419); + RzILOpPure *op_LSHIFT_425 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_428 = SUB(op_LSHIFT_425, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_429 = ITE(op_LT_414, op_NEG_420, op_SUB_428); + RzILOpEffect *gcc_expr_430 = BRANCH(op_EQ_363, EMPTY(), set_usr_field_call_389); + + // h_tmp590 = HYB(gcc_expr_if ((sextract64(((ut64) (((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))) + 0x1 >> 0x1)), 0x0, 0x10) == ((st64) (((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))) + 0x1 >> 0x1)))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))) + 0x1 >> 0x1) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_432 = SETL("h_tmp590", cond_429); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st32) ((st16) ((Rss ...; + RzILOpEffect *seq_433 = SEQN(2, gcc_expr_430, op_ASSIGN_hybrid_tmp_432); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x20)))) | (((ut64) (((sextract64(((ut64) (((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))) + 0x1 >> 0x1)), 0x0, 0x10) == ((st64) (((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))) + 0x1 >> 0x1))) ? ((st64) (((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))) + 0x1 >> 0x1)) : h_tmp590) & ((st64) 0xffff))) << 0x20))); + RzILOpPure *op_LSHIFT_305 = SHIFTL0(SN(64, 0xffff), SN(32, 0x20)); + RzILOpPure *op_NOT_306 = LOGNOT(op_LSHIFT_305); + RzILOpPure *op_AND_307 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_306); + RzILOpPure *op_RSHIFT_367 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_370 = LOGAND(op_RSHIFT_367, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_375 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_378 = LOGAND(op_RSHIFT_375, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_382 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_370), DUP(op_AND_370))), CAST(16, MSB(DUP(op_AND_370)), DUP(op_AND_370))), CAST(32, MSB(CAST(16, MSB(op_AND_378), DUP(op_AND_378))), CAST(16, MSB(DUP(op_AND_378)), DUP(op_AND_378)))); + RzILOpPure *op_ADD_384 = ADD(op_ADD_382, SN(32, 1)); + RzILOpPure *op_RSHIFT_386 = SHIFTRA(op_ADD_384, SN(32, 1)); + RzILOpPure *cond_435 = ITE(DUP(op_EQ_363), CAST(64, MSB(op_RSHIFT_386), DUP(op_RSHIFT_386)), VARL("h_tmp590")); + RzILOpPure *op_AND_438 = LOGAND(cond_435, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_443 = SHIFTL0(CAST(64, IL_FALSE, op_AND_438), SN(32, 0x20)); + RzILOpPure *op_OR_445 = LOGOR(CAST(64, IL_FALSE, op_AND_307), op_LSHIFT_443); + RzILOpEffect *op_ASSIGN_447 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_445)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st32) ((st16) (( ...; + RzILOpEffect *seq_448 = SEQN(2, seq_433, op_ASSIGN_447); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_538 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))) + 0x1 >> 0x1)), 0x0, 0x10) == ((st64) (((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))) + 0x1 >> 0x1)))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))) + 0x1 >> 0x1) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_463 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_466 = LOGAND(op_RSHIFT_463, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_471 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_474 = LOGAND(op_RSHIFT_471, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_478 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_466), DUP(op_AND_466))), CAST(16, MSB(DUP(op_AND_466)), DUP(op_AND_466))), CAST(32, MSB(CAST(16, MSB(op_AND_474), DUP(op_AND_474))), CAST(16, MSB(DUP(op_AND_474)), DUP(op_AND_474)))); + RzILOpPure *op_ADD_480 = ADD(op_SUB_478, SN(32, 1)); + RzILOpPure *op_RSHIFT_482 = SHIFTRA(op_ADD_480, SN(32, 1)); + RzILOpPure *op_RSHIFT_491 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_494 = LOGAND(op_RSHIFT_491, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_499 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_502 = LOGAND(op_RSHIFT_499, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_506 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_494), DUP(op_AND_494))), CAST(16, MSB(DUP(op_AND_494)), DUP(op_AND_494))), CAST(32, MSB(CAST(16, MSB(op_AND_502), DUP(op_AND_502))), CAST(16, MSB(DUP(op_AND_502)), DUP(op_AND_502)))); + RzILOpPure *op_ADD_508 = ADD(op_SUB_506, SN(32, 1)); + RzILOpPure *op_RSHIFT_510 = SHIFTRA(op_ADD_508, SN(32, 1)); + RzILOpPure *op_EQ_512 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_482), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_RSHIFT_510), DUP(op_RSHIFT_510))); + RzILOpPure *op_RSHIFT_542 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_545 = LOGAND(op_RSHIFT_542, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_550 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_553 = LOGAND(op_RSHIFT_550, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_557 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_545), DUP(op_AND_545))), CAST(16, MSB(DUP(op_AND_545)), DUP(op_AND_545))), CAST(32, MSB(CAST(16, MSB(op_AND_553), DUP(op_AND_553))), CAST(16, MSB(DUP(op_AND_553)), DUP(op_AND_553)))); + RzILOpPure *op_ADD_559 = ADD(op_SUB_557, SN(32, 1)); + RzILOpPure *op_RSHIFT_561 = SHIFTRA(op_ADD_559, SN(32, 1)); + RzILOpPure *op_LT_563 = SLT(op_RSHIFT_561, SN(32, 0)); + RzILOpPure *op_LSHIFT_568 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_569 = NEG(op_LSHIFT_568); + RzILOpPure *op_LSHIFT_574 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_577 = SUB(op_LSHIFT_574, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_578 = ITE(op_LT_563, op_NEG_569, op_SUB_577); + RzILOpEffect *gcc_expr_579 = BRANCH(op_EQ_512, EMPTY(), set_usr_field_call_538); + + // h_tmp591 = HYB(gcc_expr_if ((sextract64(((ut64) (((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))) + 0x1 >> 0x1)), 0x0, 0x10) == ((st64) (((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))) + 0x1 >> 0x1)))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))) + 0x1 >> 0x1) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_581 = SETL("h_tmp591", cond_578); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st32) ((st16) ((Rss ...; + RzILOpEffect *seq_582 = SEQN(2, gcc_expr_579, op_ASSIGN_hybrid_tmp_581); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x30)))) | (((ut64) (((sextract64(((ut64) (((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))) + 0x1 >> 0x1)), 0x0, 0x10) == ((st64) (((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))) + 0x1 >> 0x1))) ? ((st64) (((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))) + 0x1 >> 0x1)) : h_tmp591) & ((st64) 0xffff))) << 0x30))); + RzILOpPure *op_LSHIFT_454 = SHIFTL0(SN(64, 0xffff), SN(32, 0x30)); + RzILOpPure *op_NOT_455 = LOGNOT(op_LSHIFT_454); + RzILOpPure *op_AND_456 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_455); + RzILOpPure *op_RSHIFT_516 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_519 = LOGAND(op_RSHIFT_516, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_524 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_527 = LOGAND(op_RSHIFT_524, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_531 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_519), DUP(op_AND_519))), CAST(16, MSB(DUP(op_AND_519)), DUP(op_AND_519))), CAST(32, MSB(CAST(16, MSB(op_AND_527), DUP(op_AND_527))), CAST(16, MSB(DUP(op_AND_527)), DUP(op_AND_527)))); + RzILOpPure *op_ADD_533 = ADD(op_SUB_531, SN(32, 1)); + RzILOpPure *op_RSHIFT_535 = SHIFTRA(op_ADD_533, SN(32, 1)); + RzILOpPure *cond_584 = ITE(DUP(op_EQ_512), CAST(64, MSB(op_RSHIFT_535), DUP(op_RSHIFT_535)), VARL("h_tmp591")); + RzILOpPure *op_AND_587 = LOGAND(cond_584, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_592 = SHIFTL0(CAST(64, IL_FALSE, op_AND_587), SN(32, 0x30)); + RzILOpPure *op_OR_594 = LOGOR(CAST(64, IL_FALSE, op_AND_456), op_LSHIFT_592); + RzILOpEffect *op_ASSIGN_596 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_594)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st32) ((st16) (( ...; + RzILOpEffect *seq_597 = SEQN(2, seq_582, op_ASSIGN_596); + + RzILOpEffect *instruction_sequence = SEQN(4, seq_150, seq_299, seq_448, seq_597); + return instruction_sequence; +} + +// Rdd = vxaddsubw(Rss,Rtt):sat +RzILOpEffect *hex_il_op_s4_vxaddsubw(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_72 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))) + ((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff)))), 0x0, 0x20) == ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))) + ((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))) + ((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_17 = LOGAND(op_RSHIFT_15, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_24 = SHIFTRA(Rtt, SN(32, 0x20)); + RzILOpPure *op_AND_26 = LOGAND(op_RSHIFT_24, SN(64, 0xffffffff)); + RzILOpPure *op_ADD_29 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_17), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(64, MSB(CAST(32, MSB(op_AND_26), DUP(op_AND_26))), CAST(32, MSB(DUP(op_AND_26)), DUP(op_AND_26)))); + RzILOpPure *op_RSHIFT_38 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_38, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_46 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_48 = LOGAND(op_RSHIFT_46, SN(64, 0xffffffff)); + RzILOpPure *op_ADD_51 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_40), DUP(op_AND_40))), CAST(32, MSB(DUP(op_AND_40)), DUP(op_AND_40))), CAST(64, MSB(CAST(32, MSB(op_AND_48), DUP(op_AND_48))), CAST(32, MSB(DUP(op_AND_48)), DUP(op_AND_48)))); + RzILOpPure *op_EQ_52 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_29), SN(32, 0), SN(32, 0x20)), op_ADD_51); + RzILOpPure *op_RSHIFT_76 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_78 = LOGAND(op_RSHIFT_76, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_84 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_86 = LOGAND(op_RSHIFT_84, SN(64, 0xffffffff)); + RzILOpPure *op_ADD_89 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_78), DUP(op_AND_78))), CAST(32, MSB(DUP(op_AND_78)), DUP(op_AND_78))), CAST(64, MSB(CAST(32, MSB(op_AND_86), DUP(op_AND_86))), CAST(32, MSB(DUP(op_AND_86)), DUP(op_AND_86)))); + RzILOpPure *op_LT_92 = SLT(op_ADD_89, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_97 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_98 = NEG(op_LSHIFT_97); + RzILOpPure *op_LSHIFT_103 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_106 = SUB(op_LSHIFT_103, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_107 = ITE(op_LT_92, op_NEG_98, op_SUB_106); + RzILOpEffect *gcc_expr_108 = BRANCH(op_EQ_52, EMPTY(), set_usr_field_call_72); + + // h_tmp592 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))) + ((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff)))), 0x0, 0x20) == ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))) + ((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))) + ((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_110 = SETL("h_tmp592", cond_107); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rss > ...; + RzILOpEffect *seq_111 = SEQN(2, gcc_expr_108, op_ASSIGN_hybrid_tmp_110); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))) + ((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff)))), 0x0, 0x20) == ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))) + ((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff)))) ? ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))) + ((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) : h_tmp592) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_56 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_58 = LOGAND(op_RSHIFT_56, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_64 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_66 = LOGAND(op_RSHIFT_64, SN(64, 0xffffffff)); + RzILOpPure *op_ADD_69 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_58), DUP(op_AND_58))), CAST(32, MSB(DUP(op_AND_58)), DUP(op_AND_58))), CAST(64, MSB(CAST(32, MSB(op_AND_66), DUP(op_AND_66))), CAST(32, MSB(DUP(op_AND_66)), DUP(op_AND_66)))); + RzILOpPure *cond_112 = ITE(DUP(op_EQ_52), op_ADD_69, VARL("h_tmp592")); + RzILOpPure *op_AND_114 = LOGAND(cond_112, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_118 = SHIFTL0(op_AND_114, SN(32, 0)); + RzILOpPure *op_OR_119 = LOGOR(op_AND_7, op_LSHIFT_118); + RzILOpEffect *op_ASSIGN_120 = WRITE_REG(bundle, Rdd_op, op_OR_119); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_121 = SEQN(2, seq_111, op_ASSIGN_120); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_192 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))) - ((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff)))), 0x0, 0x20) == ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))) - ((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))) - ((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_136 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_138 = LOGAND(op_RSHIFT_136, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_144 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_146 = LOGAND(op_RSHIFT_144, SN(64, 0xffffffff)); + RzILOpPure *op_SUB_149 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_138), DUP(op_AND_138))), CAST(32, MSB(DUP(op_AND_138)), DUP(op_AND_138))), CAST(64, MSB(CAST(32, MSB(op_AND_146), DUP(op_AND_146))), CAST(32, MSB(DUP(op_AND_146)), DUP(op_AND_146)))); + RzILOpPure *op_RSHIFT_158 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_160 = LOGAND(op_RSHIFT_158, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_166 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_168 = LOGAND(op_RSHIFT_166, SN(64, 0xffffffff)); + RzILOpPure *op_SUB_171 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_160), DUP(op_AND_160))), CAST(32, MSB(DUP(op_AND_160)), DUP(op_AND_160))), CAST(64, MSB(CAST(32, MSB(op_AND_168), DUP(op_AND_168))), CAST(32, MSB(DUP(op_AND_168)), DUP(op_AND_168)))); + RzILOpPure *op_EQ_172 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_149), SN(32, 0), SN(32, 0x20)), op_SUB_171); + RzILOpPure *op_RSHIFT_196 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_198 = LOGAND(op_RSHIFT_196, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_204 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_206 = LOGAND(op_RSHIFT_204, SN(64, 0xffffffff)); + RzILOpPure *op_SUB_209 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_198), DUP(op_AND_198))), CAST(32, MSB(DUP(op_AND_198)), DUP(op_AND_198))), CAST(64, MSB(CAST(32, MSB(op_AND_206), DUP(op_AND_206))), CAST(32, MSB(DUP(op_AND_206)), DUP(op_AND_206)))); + RzILOpPure *op_LT_212 = SLT(op_SUB_209, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_217 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_218 = NEG(op_LSHIFT_217); + RzILOpPure *op_LSHIFT_223 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_226 = SUB(op_LSHIFT_223, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_227 = ITE(op_LT_212, op_NEG_218, op_SUB_226); + RzILOpEffect *gcc_expr_228 = BRANCH(op_EQ_172, EMPTY(), set_usr_field_call_192); + + // h_tmp593 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))) - ((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff)))), 0x0, 0x20) == ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))) - ((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))) - ((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_230 = SETL("h_tmp593", cond_227); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rss > ...; + RzILOpEffect *seq_231 = SEQN(2, gcc_expr_228, op_ASSIGN_hybrid_tmp_230); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))) - ((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff)))), 0x0, 0x20) == ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))) - ((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff)))) ? ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))) - ((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) : h_tmp593) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_127 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_128 = LOGNOT(op_LSHIFT_127); + RzILOpPure *op_AND_129 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_128); + RzILOpPure *op_RSHIFT_176 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_178 = LOGAND(op_RSHIFT_176, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_184 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_186 = LOGAND(op_RSHIFT_184, SN(64, 0xffffffff)); + RzILOpPure *op_SUB_189 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_178), DUP(op_AND_178))), CAST(32, MSB(DUP(op_AND_178)), DUP(op_AND_178))), CAST(64, MSB(CAST(32, MSB(op_AND_186), DUP(op_AND_186))), CAST(32, MSB(DUP(op_AND_186)), DUP(op_AND_186)))); + RzILOpPure *cond_232 = ITE(DUP(op_EQ_172), op_SUB_189, VARL("h_tmp593")); + RzILOpPure *op_AND_234 = LOGAND(cond_232, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_238 = SHIFTL0(op_AND_234, SN(32, 0x20)); + RzILOpPure *op_OR_239 = LOGOR(op_AND_129, op_LSHIFT_238); + RzILOpEffect *op_ASSIGN_240 = WRITE_REG(bundle, Rdd_op, op_OR_239); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_241 = SEQN(2, seq_231, op_ASSIGN_240); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_121, seq_241); + return instruction_sequence; +} + +// Rdd = vxsubaddh(Rss,Rtt):sat +RzILOpEffect *hex_il_op_s4_vxsubaddh(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_79 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_15, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_24 = SHIFTRA(Rtt, SN(32, 16)); + RzILOpPure *op_AND_27 = LOGAND(op_RSHIFT_24, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_31 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(CAST(16, MSB(op_AND_27), DUP(op_AND_27))), CAST(16, MSB(DUP(op_AND_27)), DUP(op_AND_27)))); + RzILOpPure *op_RSHIFT_40 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_43 = LOGAND(op_RSHIFT_40, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_48 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_51 = LOGAND(op_RSHIFT_48, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_55 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_43), DUP(op_AND_43))), CAST(16, MSB(DUP(op_AND_43)), DUP(op_AND_43))), CAST(32, MSB(CAST(16, MSB(op_AND_51), DUP(op_AND_51))), CAST(16, MSB(DUP(op_AND_51)), DUP(op_AND_51)))); + RzILOpPure *op_EQ_57 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_31), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_SUB_55), DUP(op_SUB_55))); + RzILOpPure *op_RSHIFT_83 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_86 = LOGAND(op_RSHIFT_83, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_91 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_94 = LOGAND(op_RSHIFT_91, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_98 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_86), DUP(op_AND_86))), CAST(16, MSB(DUP(op_AND_86)), DUP(op_AND_86))), CAST(32, MSB(CAST(16, MSB(op_AND_94), DUP(op_AND_94))), CAST(16, MSB(DUP(op_AND_94)), DUP(op_AND_94)))); + RzILOpPure *op_LT_100 = SLT(op_SUB_98, SN(32, 0)); + RzILOpPure *op_LSHIFT_105 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_106 = NEG(op_LSHIFT_105); + RzILOpPure *op_LSHIFT_111 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_114 = SUB(op_LSHIFT_111, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_115 = ITE(op_LT_100, op_NEG_106, op_SUB_114); + RzILOpEffect *gcc_expr_116 = BRANCH(op_EQ_57, EMPTY(), set_usr_field_call_79); + + // h_tmp594 = HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_118 = SETL("h_tmp594", cond_115); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rss > ...; + RzILOpEffect *seq_119 = SEQN(2, gcc_expr_116, op_ASSIGN_hybrid_tmp_118); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x0)))) | (((ut64) (((sextract64(((ut64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))))) ? ((st64) ((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff))))) : h_tmp594) & ((st64) 0xffff))) << 0x0))); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_61 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_64 = LOGAND(op_RSHIFT_61, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_69 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_72 = LOGAND(op_RSHIFT_69, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_76 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_64), DUP(op_AND_64))), CAST(16, MSB(DUP(op_AND_64)), DUP(op_AND_64))), CAST(32, MSB(CAST(16, MSB(op_AND_72), DUP(op_AND_72))), CAST(16, MSB(DUP(op_AND_72)), DUP(op_AND_72)))); + RzILOpPure *cond_121 = ITE(DUP(op_EQ_57), CAST(64, MSB(op_SUB_76), DUP(op_SUB_76)), VARL("h_tmp594")); + RzILOpPure *op_AND_124 = LOGAND(cond_121, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_129 = SHIFTL0(CAST(64, IL_FALSE, op_AND_124), SN(32, 0)); + RzILOpPure *op_OR_131 = LOGOR(CAST(64, IL_FALSE, op_AND_7), op_LSHIFT_129); + RzILOpEffect *op_ASSIGN_133 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_131)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((R ...; + RzILOpEffect *seq_134 = SEQN(2, seq_119, op_ASSIGN_133); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_212 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_149 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_152 = LOGAND(op_RSHIFT_149, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_157 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_160 = LOGAND(op_RSHIFT_157, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_164 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_152), DUP(op_AND_152))), CAST(16, MSB(DUP(op_AND_152)), DUP(op_AND_152))), CAST(32, MSB(CAST(16, MSB(op_AND_160), DUP(op_AND_160))), CAST(16, MSB(DUP(op_AND_160)), DUP(op_AND_160)))); + RzILOpPure *op_RSHIFT_173 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_176 = LOGAND(op_RSHIFT_173, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_181 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_184 = LOGAND(op_RSHIFT_181, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_188 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_176), DUP(op_AND_176))), CAST(16, MSB(DUP(op_AND_176)), DUP(op_AND_176))), CAST(32, MSB(CAST(16, MSB(op_AND_184), DUP(op_AND_184))), CAST(16, MSB(DUP(op_AND_184)), DUP(op_AND_184)))); + RzILOpPure *op_EQ_190 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_164), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_ADD_188), DUP(op_ADD_188))); + RzILOpPure *op_RSHIFT_216 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_219 = LOGAND(op_RSHIFT_216, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_224 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_227 = LOGAND(op_RSHIFT_224, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_231 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_219), DUP(op_AND_219))), CAST(16, MSB(DUP(op_AND_219)), DUP(op_AND_219))), CAST(32, MSB(CAST(16, MSB(op_AND_227), DUP(op_AND_227))), CAST(16, MSB(DUP(op_AND_227)), DUP(op_AND_227)))); + RzILOpPure *op_LT_233 = SLT(op_ADD_231, SN(32, 0)); + RzILOpPure *op_LSHIFT_238 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_239 = NEG(op_LSHIFT_238); + RzILOpPure *op_LSHIFT_244 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_247 = SUB(op_LSHIFT_244, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_248 = ITE(op_LT_233, op_NEG_239, op_SUB_247); + RzILOpEffect *gcc_expr_249 = BRANCH(op_EQ_190, EMPTY(), set_usr_field_call_212); + + // h_tmp595 = HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_251 = SETL("h_tmp595", cond_248); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rss > ...; + RzILOpEffect *seq_252 = SEQN(2, gcc_expr_249, op_ASSIGN_hybrid_tmp_251); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x10)))) | (((ut64) (((sextract64(((ut64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))))) ? ((st64) ((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff))))) : h_tmp595) & ((st64) 0xffff))) << 0x10))); + RzILOpPure *op_LSHIFT_140 = SHIFTL0(SN(64, 0xffff), SN(32, 16)); + RzILOpPure *op_NOT_141 = LOGNOT(op_LSHIFT_140); + RzILOpPure *op_AND_142 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_141); + RzILOpPure *op_RSHIFT_194 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_197 = LOGAND(op_RSHIFT_194, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_202 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_205 = LOGAND(op_RSHIFT_202, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_209 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_197), DUP(op_AND_197))), CAST(16, MSB(DUP(op_AND_197)), DUP(op_AND_197))), CAST(32, MSB(CAST(16, MSB(op_AND_205), DUP(op_AND_205))), CAST(16, MSB(DUP(op_AND_205)), DUP(op_AND_205)))); + RzILOpPure *cond_254 = ITE(DUP(op_EQ_190), CAST(64, MSB(op_ADD_209), DUP(op_ADD_209)), VARL("h_tmp595")); + RzILOpPure *op_AND_257 = LOGAND(cond_254, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_262 = SHIFTL0(CAST(64, IL_FALSE, op_AND_257), SN(32, 16)); + RzILOpPure *op_OR_264 = LOGOR(CAST(64, IL_FALSE, op_AND_142), op_LSHIFT_262); + RzILOpEffect *op_ASSIGN_266 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_264)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((R ...; + RzILOpEffect *seq_267 = SEQN(2, seq_252, op_ASSIGN_266); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_345 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_282 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_285 = LOGAND(op_RSHIFT_282, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_290 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_293 = LOGAND(op_RSHIFT_290, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_297 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_285), DUP(op_AND_285))), CAST(16, MSB(DUP(op_AND_285)), DUP(op_AND_285))), CAST(32, MSB(CAST(16, MSB(op_AND_293), DUP(op_AND_293))), CAST(16, MSB(DUP(op_AND_293)), DUP(op_AND_293)))); + RzILOpPure *op_RSHIFT_306 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_309 = LOGAND(op_RSHIFT_306, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_314 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_317 = LOGAND(op_RSHIFT_314, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_321 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_309), DUP(op_AND_309))), CAST(16, MSB(DUP(op_AND_309)), DUP(op_AND_309))), CAST(32, MSB(CAST(16, MSB(op_AND_317), DUP(op_AND_317))), CAST(16, MSB(DUP(op_AND_317)), DUP(op_AND_317)))); + RzILOpPure *op_EQ_323 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_297), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_SUB_321), DUP(op_SUB_321))); + RzILOpPure *op_RSHIFT_349 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_352 = LOGAND(op_RSHIFT_349, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_357 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_360 = LOGAND(op_RSHIFT_357, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_364 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_352), DUP(op_AND_352))), CAST(16, MSB(DUP(op_AND_352)), DUP(op_AND_352))), CAST(32, MSB(CAST(16, MSB(op_AND_360), DUP(op_AND_360))), CAST(16, MSB(DUP(op_AND_360)), DUP(op_AND_360)))); + RzILOpPure *op_LT_366 = SLT(op_SUB_364, SN(32, 0)); + RzILOpPure *op_LSHIFT_371 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_372 = NEG(op_LSHIFT_371); + RzILOpPure *op_LSHIFT_377 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_380 = SUB(op_LSHIFT_377, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_381 = ITE(op_LT_366, op_NEG_372, op_SUB_380); + RzILOpEffect *gcc_expr_382 = BRANCH(op_EQ_323, EMPTY(), set_usr_field_call_345); + + // h_tmp596 = HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_384 = SETL("h_tmp596", cond_381); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rss > ...; + RzILOpEffect *seq_385 = SEQN(2, gcc_expr_382, op_ASSIGN_hybrid_tmp_384); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x20)))) | (((ut64) (((sextract64(((ut64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))))) ? ((st64) ((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff))))) : h_tmp596) & ((st64) 0xffff))) << 0x20))); + RzILOpPure *op_LSHIFT_273 = SHIFTL0(SN(64, 0xffff), SN(32, 0x20)); + RzILOpPure *op_NOT_274 = LOGNOT(op_LSHIFT_273); + RzILOpPure *op_AND_275 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_274); + RzILOpPure *op_RSHIFT_327 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_330 = LOGAND(op_RSHIFT_327, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_335 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_338 = LOGAND(op_RSHIFT_335, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_342 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_330), DUP(op_AND_330))), CAST(16, MSB(DUP(op_AND_330)), DUP(op_AND_330))), CAST(32, MSB(CAST(16, MSB(op_AND_338), DUP(op_AND_338))), CAST(16, MSB(DUP(op_AND_338)), DUP(op_AND_338)))); + RzILOpPure *cond_387 = ITE(DUP(op_EQ_323), CAST(64, MSB(op_SUB_342), DUP(op_SUB_342)), VARL("h_tmp596")); + RzILOpPure *op_AND_390 = LOGAND(cond_387, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_395 = SHIFTL0(CAST(64, IL_FALSE, op_AND_390), SN(32, 0x20)); + RzILOpPure *op_OR_397 = LOGOR(CAST(64, IL_FALSE, op_AND_275), op_LSHIFT_395); + RzILOpEffect *op_ASSIGN_399 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_397)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((R ...; + RzILOpEffect *seq_400 = SEQN(2, seq_385, op_ASSIGN_399); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_478 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_415 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_418 = LOGAND(op_RSHIFT_415, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_423 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_426 = LOGAND(op_RSHIFT_423, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_430 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_418), DUP(op_AND_418))), CAST(16, MSB(DUP(op_AND_418)), DUP(op_AND_418))), CAST(32, MSB(CAST(16, MSB(op_AND_426), DUP(op_AND_426))), CAST(16, MSB(DUP(op_AND_426)), DUP(op_AND_426)))); + RzILOpPure *op_RSHIFT_439 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_442 = LOGAND(op_RSHIFT_439, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_447 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_450 = LOGAND(op_RSHIFT_447, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_454 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_442), DUP(op_AND_442))), CAST(16, MSB(DUP(op_AND_442)), DUP(op_AND_442))), CAST(32, MSB(CAST(16, MSB(op_AND_450), DUP(op_AND_450))), CAST(16, MSB(DUP(op_AND_450)), DUP(op_AND_450)))); + RzILOpPure *op_EQ_456 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_430), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_ADD_454), DUP(op_ADD_454))); + RzILOpPure *op_RSHIFT_482 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_485 = LOGAND(op_RSHIFT_482, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_490 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_493 = LOGAND(op_RSHIFT_490, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_497 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_485), DUP(op_AND_485))), CAST(16, MSB(DUP(op_AND_485)), DUP(op_AND_485))), CAST(32, MSB(CAST(16, MSB(op_AND_493), DUP(op_AND_493))), CAST(16, MSB(DUP(op_AND_493)), DUP(op_AND_493)))); + RzILOpPure *op_LT_499 = SLT(op_ADD_497, SN(32, 0)); + RzILOpPure *op_LSHIFT_504 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_505 = NEG(op_LSHIFT_504); + RzILOpPure *op_LSHIFT_510 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_513 = SUB(op_LSHIFT_510, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_514 = ITE(op_LT_499, op_NEG_505, op_SUB_513); + RzILOpEffect *gcc_expr_515 = BRANCH(op_EQ_456, EMPTY(), set_usr_field_call_478); + + // h_tmp597 = HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_517 = SETL("h_tmp597", cond_514); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((Rss > ...; + RzILOpEffect *seq_518 = SEQN(2, gcc_expr_515, op_ASSIGN_hybrid_tmp_517); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x30)))) | (((ut64) (((sextract64(((ut64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))), 0x0, 0x10) == ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))))) ? ((st64) ((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff))))) : h_tmp597) & ((st64) 0xffff))) << 0x30))); + RzILOpPure *op_LSHIFT_406 = SHIFTL0(SN(64, 0xffff), SN(32, 0x30)); + RzILOpPure *op_NOT_407 = LOGNOT(op_LSHIFT_406); + RzILOpPure *op_AND_408 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_407); + RzILOpPure *op_RSHIFT_460 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_463 = LOGAND(op_RSHIFT_460, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_468 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_471 = LOGAND(op_RSHIFT_468, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_475 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_463), DUP(op_AND_463))), CAST(16, MSB(DUP(op_AND_463)), DUP(op_AND_463))), CAST(32, MSB(CAST(16, MSB(op_AND_471), DUP(op_AND_471))), CAST(16, MSB(DUP(op_AND_471)), DUP(op_AND_471)))); + RzILOpPure *cond_520 = ITE(DUP(op_EQ_456), CAST(64, MSB(op_ADD_475), DUP(op_ADD_475)), VARL("h_tmp597")); + RzILOpPure *op_AND_523 = LOGAND(cond_520, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_528 = SHIFTL0(CAST(64, IL_FALSE, op_AND_523), SN(32, 0x30)); + RzILOpPure *op_OR_530 = LOGOR(CAST(64, IL_FALSE, op_AND_408), op_LSHIFT_528); + RzILOpEffect *op_ASSIGN_532 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_530)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st32) ((st16) ((R ...; + RzILOpEffect *seq_533 = SEQN(2, seq_518, op_ASSIGN_532); + + RzILOpEffect *instruction_sequence = SEQN(4, seq_134, seq_267, seq_400, seq_533); + return instruction_sequence; +} + +// Rdd = vxsubaddh(Rss,Rtt):rnd:>>1:sat +RzILOpEffect *hex_il_op_s4_vxsubaddhr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_91 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))) + 0x1 >> 0x1)), 0x0, 0x10) == ((st64) (((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))) + 0x1 >> 0x1)))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))) + 0x1 >> 0x1) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_18 = LOGAND(op_RSHIFT_15, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_24 = SHIFTRA(Rtt, SN(32, 16)); + RzILOpPure *op_AND_27 = LOGAND(op_RSHIFT_24, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_31 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_18), DUP(op_AND_18))), CAST(16, MSB(DUP(op_AND_18)), DUP(op_AND_18))), CAST(32, MSB(CAST(16, MSB(op_AND_27), DUP(op_AND_27))), CAST(16, MSB(DUP(op_AND_27)), DUP(op_AND_27)))); + RzILOpPure *op_ADD_33 = ADD(op_SUB_31, SN(32, 1)); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(op_ADD_33, SN(32, 1)); + RzILOpPure *op_RSHIFT_44 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_47 = LOGAND(op_RSHIFT_44, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_52 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_55 = LOGAND(op_RSHIFT_52, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_59 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_47), DUP(op_AND_47))), CAST(16, MSB(DUP(op_AND_47)), DUP(op_AND_47))), CAST(32, MSB(CAST(16, MSB(op_AND_55), DUP(op_AND_55))), CAST(16, MSB(DUP(op_AND_55)), DUP(op_AND_55)))); + RzILOpPure *op_ADD_61 = ADD(op_SUB_59, SN(32, 1)); + RzILOpPure *op_RSHIFT_63 = SHIFTRA(op_ADD_61, SN(32, 1)); + RzILOpPure *op_EQ_65 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_35), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_RSHIFT_63), DUP(op_RSHIFT_63))); + RzILOpPure *op_RSHIFT_95 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_98 = LOGAND(op_RSHIFT_95, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_103 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_106 = LOGAND(op_RSHIFT_103, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_110 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_98), DUP(op_AND_98))), CAST(16, MSB(DUP(op_AND_98)), DUP(op_AND_98))), CAST(32, MSB(CAST(16, MSB(op_AND_106), DUP(op_AND_106))), CAST(16, MSB(DUP(op_AND_106)), DUP(op_AND_106)))); + RzILOpPure *op_ADD_112 = ADD(op_SUB_110, SN(32, 1)); + RzILOpPure *op_RSHIFT_114 = SHIFTRA(op_ADD_112, SN(32, 1)); + RzILOpPure *op_LT_116 = SLT(op_RSHIFT_114, SN(32, 0)); + RzILOpPure *op_LSHIFT_121 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_122 = NEG(op_LSHIFT_121); + RzILOpPure *op_LSHIFT_127 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_130 = SUB(op_LSHIFT_127, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_131 = ITE(op_LT_116, op_NEG_122, op_SUB_130); + RzILOpEffect *gcc_expr_132 = BRANCH(op_EQ_65, EMPTY(), set_usr_field_call_91); + + // h_tmp598 = HYB(gcc_expr_if ((sextract64(((ut64) (((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))) + 0x1 >> 0x1)), 0x0, 0x10) == ((st64) (((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))) + 0x1 >> 0x1)))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))) + 0x1 >> 0x1) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_134 = SETL("h_tmp598", cond_131); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st32) ((st16) ((Rss ...; + RzILOpEffect *seq_135 = SEQN(2, gcc_expr_132, op_ASSIGN_hybrid_tmp_134); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x0)))) | (((ut64) (((sextract64(((ut64) (((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))) + 0x1 >> 0x1)), 0x0, 0x10) == ((st64) (((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))) + 0x1 >> 0x1))) ? ((st64) (((st32) ((st16) ((Rss >> 0x0) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x10) & ((st64) 0xffff)))) + 0x1 >> 0x1)) : h_tmp598) & ((st64) 0xffff))) << 0x0))); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_69 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_72 = LOGAND(op_RSHIFT_69, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_77 = SHIFTRA(DUP(Rtt), SN(32, 16)); + RzILOpPure *op_AND_80 = LOGAND(op_RSHIFT_77, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_84 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_72), DUP(op_AND_72))), CAST(16, MSB(DUP(op_AND_72)), DUP(op_AND_72))), CAST(32, MSB(CAST(16, MSB(op_AND_80), DUP(op_AND_80))), CAST(16, MSB(DUP(op_AND_80)), DUP(op_AND_80)))); + RzILOpPure *op_ADD_86 = ADD(op_SUB_84, SN(32, 1)); + RzILOpPure *op_RSHIFT_88 = SHIFTRA(op_ADD_86, SN(32, 1)); + RzILOpPure *cond_137 = ITE(DUP(op_EQ_65), CAST(64, MSB(op_RSHIFT_88), DUP(op_RSHIFT_88)), VARL("h_tmp598")); + RzILOpPure *op_AND_140 = LOGAND(cond_137, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_145 = SHIFTL0(CAST(64, IL_FALSE, op_AND_140), SN(32, 0)); + RzILOpPure *op_OR_147 = LOGOR(CAST(64, IL_FALSE, op_AND_7), op_LSHIFT_145); + RzILOpEffect *op_ASSIGN_149 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_147)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st32) ((st16) (( ...; + RzILOpEffect *seq_150 = SEQN(2, seq_135, op_ASSIGN_149); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_240 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))) + 0x1 >> 0x1)), 0x0, 0x10) == ((st64) (((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))) + 0x1 >> 0x1)))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))) + 0x1 >> 0x1) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_165 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_168 = LOGAND(op_RSHIFT_165, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_173 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_176 = LOGAND(op_RSHIFT_173, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_180 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_168), DUP(op_AND_168))), CAST(16, MSB(DUP(op_AND_168)), DUP(op_AND_168))), CAST(32, MSB(CAST(16, MSB(op_AND_176), DUP(op_AND_176))), CAST(16, MSB(DUP(op_AND_176)), DUP(op_AND_176)))); + RzILOpPure *op_ADD_182 = ADD(op_ADD_180, SN(32, 1)); + RzILOpPure *op_RSHIFT_184 = SHIFTRA(op_ADD_182, SN(32, 1)); + RzILOpPure *op_RSHIFT_193 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_196 = LOGAND(op_RSHIFT_193, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_201 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_204 = LOGAND(op_RSHIFT_201, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_208 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_196), DUP(op_AND_196))), CAST(16, MSB(DUP(op_AND_196)), DUP(op_AND_196))), CAST(32, MSB(CAST(16, MSB(op_AND_204), DUP(op_AND_204))), CAST(16, MSB(DUP(op_AND_204)), DUP(op_AND_204)))); + RzILOpPure *op_ADD_210 = ADD(op_ADD_208, SN(32, 1)); + RzILOpPure *op_RSHIFT_212 = SHIFTRA(op_ADD_210, SN(32, 1)); + RzILOpPure *op_EQ_214 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_184), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_RSHIFT_212), DUP(op_RSHIFT_212))); + RzILOpPure *op_RSHIFT_244 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_247 = LOGAND(op_RSHIFT_244, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_252 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_255 = LOGAND(op_RSHIFT_252, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_259 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_247), DUP(op_AND_247))), CAST(16, MSB(DUP(op_AND_247)), DUP(op_AND_247))), CAST(32, MSB(CAST(16, MSB(op_AND_255), DUP(op_AND_255))), CAST(16, MSB(DUP(op_AND_255)), DUP(op_AND_255)))); + RzILOpPure *op_ADD_261 = ADD(op_ADD_259, SN(32, 1)); + RzILOpPure *op_RSHIFT_263 = SHIFTRA(op_ADD_261, SN(32, 1)); + RzILOpPure *op_LT_265 = SLT(op_RSHIFT_263, SN(32, 0)); + RzILOpPure *op_LSHIFT_270 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_271 = NEG(op_LSHIFT_270); + RzILOpPure *op_LSHIFT_276 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_279 = SUB(op_LSHIFT_276, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_280 = ITE(op_LT_265, op_NEG_271, op_SUB_279); + RzILOpEffect *gcc_expr_281 = BRANCH(op_EQ_214, EMPTY(), set_usr_field_call_240); + + // h_tmp599 = HYB(gcc_expr_if ((sextract64(((ut64) (((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))) + 0x1 >> 0x1)), 0x0, 0x10) == ((st64) (((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))) + 0x1 >> 0x1)))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))) + 0x1 >> 0x1) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_283 = SETL("h_tmp599", cond_280); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st32) ((st16) ((Rss ...; + RzILOpEffect *seq_284 = SEQN(2, gcc_expr_281, op_ASSIGN_hybrid_tmp_283); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x10)))) | (((ut64) (((sextract64(((ut64) (((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))) + 0x1 >> 0x1)), 0x0, 0x10) == ((st64) (((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))) + 0x1 >> 0x1))) ? ((st64) (((st32) ((st16) ((Rss >> 0x10) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x0) & ((st64) 0xffff)))) + 0x1 >> 0x1)) : h_tmp599) & ((st64) 0xffff))) << 0x10))); + RzILOpPure *op_LSHIFT_156 = SHIFTL0(SN(64, 0xffff), SN(32, 16)); + RzILOpPure *op_NOT_157 = LOGNOT(op_LSHIFT_156); + RzILOpPure *op_AND_158 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_157); + RzILOpPure *op_RSHIFT_218 = SHIFTRA(DUP(Rss), SN(32, 16)); + RzILOpPure *op_AND_221 = LOGAND(op_RSHIFT_218, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_226 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_229 = LOGAND(op_RSHIFT_226, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_233 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_221), DUP(op_AND_221))), CAST(16, MSB(DUP(op_AND_221)), DUP(op_AND_221))), CAST(32, MSB(CAST(16, MSB(op_AND_229), DUP(op_AND_229))), CAST(16, MSB(DUP(op_AND_229)), DUP(op_AND_229)))); + RzILOpPure *op_ADD_235 = ADD(op_ADD_233, SN(32, 1)); + RzILOpPure *op_RSHIFT_237 = SHIFTRA(op_ADD_235, SN(32, 1)); + RzILOpPure *cond_286 = ITE(DUP(op_EQ_214), CAST(64, MSB(op_RSHIFT_237), DUP(op_RSHIFT_237)), VARL("h_tmp599")); + RzILOpPure *op_AND_289 = LOGAND(cond_286, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_294 = SHIFTL0(CAST(64, IL_FALSE, op_AND_289), SN(32, 16)); + RzILOpPure *op_OR_296 = LOGOR(CAST(64, IL_FALSE, op_AND_158), op_LSHIFT_294); + RzILOpEffect *op_ASSIGN_298 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_296)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st32) ((st16) (( ...; + RzILOpEffect *seq_299 = SEQN(2, seq_284, op_ASSIGN_298); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_389 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))) + 0x1 >> 0x1)), 0x0, 0x10) == ((st64) (((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))) + 0x1 >> 0x1)))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))) + 0x1 >> 0x1) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_314 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_317 = LOGAND(op_RSHIFT_314, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_322 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_325 = LOGAND(op_RSHIFT_322, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_329 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_317), DUP(op_AND_317))), CAST(16, MSB(DUP(op_AND_317)), DUP(op_AND_317))), CAST(32, MSB(CAST(16, MSB(op_AND_325), DUP(op_AND_325))), CAST(16, MSB(DUP(op_AND_325)), DUP(op_AND_325)))); + RzILOpPure *op_ADD_331 = ADD(op_SUB_329, SN(32, 1)); + RzILOpPure *op_RSHIFT_333 = SHIFTRA(op_ADD_331, SN(32, 1)); + RzILOpPure *op_RSHIFT_342 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_345 = LOGAND(op_RSHIFT_342, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_350 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_353 = LOGAND(op_RSHIFT_350, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_357 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_345), DUP(op_AND_345))), CAST(16, MSB(DUP(op_AND_345)), DUP(op_AND_345))), CAST(32, MSB(CAST(16, MSB(op_AND_353), DUP(op_AND_353))), CAST(16, MSB(DUP(op_AND_353)), DUP(op_AND_353)))); + RzILOpPure *op_ADD_359 = ADD(op_SUB_357, SN(32, 1)); + RzILOpPure *op_RSHIFT_361 = SHIFTRA(op_ADD_359, SN(32, 1)); + RzILOpPure *op_EQ_363 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_333), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_RSHIFT_361), DUP(op_RSHIFT_361))); + RzILOpPure *op_RSHIFT_393 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_396 = LOGAND(op_RSHIFT_393, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_401 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_404 = LOGAND(op_RSHIFT_401, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_408 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_396), DUP(op_AND_396))), CAST(16, MSB(DUP(op_AND_396)), DUP(op_AND_396))), CAST(32, MSB(CAST(16, MSB(op_AND_404), DUP(op_AND_404))), CAST(16, MSB(DUP(op_AND_404)), DUP(op_AND_404)))); + RzILOpPure *op_ADD_410 = ADD(op_SUB_408, SN(32, 1)); + RzILOpPure *op_RSHIFT_412 = SHIFTRA(op_ADD_410, SN(32, 1)); + RzILOpPure *op_LT_414 = SLT(op_RSHIFT_412, SN(32, 0)); + RzILOpPure *op_LSHIFT_419 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_420 = NEG(op_LSHIFT_419); + RzILOpPure *op_LSHIFT_425 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_428 = SUB(op_LSHIFT_425, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_429 = ITE(op_LT_414, op_NEG_420, op_SUB_428); + RzILOpEffect *gcc_expr_430 = BRANCH(op_EQ_363, EMPTY(), set_usr_field_call_389); + + // h_tmp600 = HYB(gcc_expr_if ((sextract64(((ut64) (((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))) + 0x1 >> 0x1)), 0x0, 0x10) == ((st64) (((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))) + 0x1 >> 0x1)))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))) + 0x1 >> 0x1) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_432 = SETL("h_tmp600", cond_429); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st32) ((st16) ((Rss ...; + RzILOpEffect *seq_433 = SEQN(2, gcc_expr_430, op_ASSIGN_hybrid_tmp_432); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x20)))) | (((ut64) (((sextract64(((ut64) (((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))) + 0x1 >> 0x1)), 0x0, 0x10) == ((st64) (((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))) + 0x1 >> 0x1))) ? ((st64) (((st32) ((st16) ((Rss >> 0x20) & ((st64) 0xffff)))) - ((st32) ((st16) ((Rtt >> 0x30) & ((st64) 0xffff)))) + 0x1 >> 0x1)) : h_tmp600) & ((st64) 0xffff))) << 0x20))); + RzILOpPure *op_LSHIFT_305 = SHIFTL0(SN(64, 0xffff), SN(32, 0x20)); + RzILOpPure *op_NOT_306 = LOGNOT(op_LSHIFT_305); + RzILOpPure *op_AND_307 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_306); + RzILOpPure *op_RSHIFT_367 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_370 = LOGAND(op_RSHIFT_367, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_375 = SHIFTRA(DUP(Rtt), SN(32, 0x30)); + RzILOpPure *op_AND_378 = LOGAND(op_RSHIFT_375, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_SUB_382 = SUB(CAST(32, MSB(CAST(16, MSB(op_AND_370), DUP(op_AND_370))), CAST(16, MSB(DUP(op_AND_370)), DUP(op_AND_370))), CAST(32, MSB(CAST(16, MSB(op_AND_378), DUP(op_AND_378))), CAST(16, MSB(DUP(op_AND_378)), DUP(op_AND_378)))); + RzILOpPure *op_ADD_384 = ADD(op_SUB_382, SN(32, 1)); + RzILOpPure *op_RSHIFT_386 = SHIFTRA(op_ADD_384, SN(32, 1)); + RzILOpPure *cond_435 = ITE(DUP(op_EQ_363), CAST(64, MSB(op_RSHIFT_386), DUP(op_RSHIFT_386)), VARL("h_tmp600")); + RzILOpPure *op_AND_438 = LOGAND(cond_435, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_443 = SHIFTL0(CAST(64, IL_FALSE, op_AND_438), SN(32, 0x20)); + RzILOpPure *op_OR_445 = LOGOR(CAST(64, IL_FALSE, op_AND_307), op_LSHIFT_443); + RzILOpEffect *op_ASSIGN_447 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_445)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st32) ((st16) (( ...; + RzILOpEffect *seq_448 = SEQN(2, seq_433, op_ASSIGN_447); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_538 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) (((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))) + 0x1 >> 0x1)), 0x0, 0x10) == ((st64) (((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))) + 0x1 >> 0x1)))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))) + 0x1 >> 0x1) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_463 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_466 = LOGAND(op_RSHIFT_463, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_471 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_474 = LOGAND(op_RSHIFT_471, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_478 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_466), DUP(op_AND_466))), CAST(16, MSB(DUP(op_AND_466)), DUP(op_AND_466))), CAST(32, MSB(CAST(16, MSB(op_AND_474), DUP(op_AND_474))), CAST(16, MSB(DUP(op_AND_474)), DUP(op_AND_474)))); + RzILOpPure *op_ADD_480 = ADD(op_ADD_478, SN(32, 1)); + RzILOpPure *op_RSHIFT_482 = SHIFTRA(op_ADD_480, SN(32, 1)); + RzILOpPure *op_RSHIFT_491 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_494 = LOGAND(op_RSHIFT_491, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_499 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_502 = LOGAND(op_RSHIFT_499, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_506 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_494), DUP(op_AND_494))), CAST(16, MSB(DUP(op_AND_494)), DUP(op_AND_494))), CAST(32, MSB(CAST(16, MSB(op_AND_502), DUP(op_AND_502))), CAST(16, MSB(DUP(op_AND_502)), DUP(op_AND_502)))); + RzILOpPure *op_ADD_508 = ADD(op_ADD_506, SN(32, 1)); + RzILOpPure *op_RSHIFT_510 = SHIFTRA(op_ADD_508, SN(32, 1)); + RzILOpPure *op_EQ_512 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_482), SN(32, 0), SN(32, 16)), CAST(64, MSB(op_RSHIFT_510), DUP(op_RSHIFT_510))); + RzILOpPure *op_RSHIFT_542 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_545 = LOGAND(op_RSHIFT_542, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_550 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_553 = LOGAND(op_RSHIFT_550, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_557 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_545), DUP(op_AND_545))), CAST(16, MSB(DUP(op_AND_545)), DUP(op_AND_545))), CAST(32, MSB(CAST(16, MSB(op_AND_553), DUP(op_AND_553))), CAST(16, MSB(DUP(op_AND_553)), DUP(op_AND_553)))); + RzILOpPure *op_ADD_559 = ADD(op_ADD_557, SN(32, 1)); + RzILOpPure *op_RSHIFT_561 = SHIFTRA(op_ADD_559, SN(32, 1)); + RzILOpPure *op_LT_563 = SLT(op_RSHIFT_561, SN(32, 0)); + RzILOpPure *op_LSHIFT_568 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_NEG_569 = NEG(op_LSHIFT_568); + RzILOpPure *op_LSHIFT_574 = SHIFTL0(SN(64, 1), SN(32, 15)); + RzILOpPure *op_SUB_577 = SUB(op_LSHIFT_574, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_578 = ITE(op_LT_563, op_NEG_569, op_SUB_577); + RzILOpEffect *gcc_expr_579 = BRANCH(op_EQ_512, EMPTY(), set_usr_field_call_538); + + // h_tmp601 = HYB(gcc_expr_if ((sextract64(((ut64) (((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))) + 0x1 >> 0x1)), 0x0, 0x10) == ((st64) (((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))) + 0x1 >> 0x1)))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))) + 0x1 >> 0x1) < 0x0) ? (-(0x1 << 0xf)) : (0x1 << 0xf) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_581 = SETL("h_tmp601", cond_578); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st32) ((st16) ((Rss ...; + RzILOpEffect *seq_582 = SEQN(2, gcc_expr_579, op_ASSIGN_hybrid_tmp_581); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << 0x30)))) | (((ut64) (((sextract64(((ut64) (((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))) + 0x1 >> 0x1)), 0x0, 0x10) == ((st64) (((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))) + 0x1 >> 0x1))) ? ((st64) (((st32) ((st16) ((Rss >> 0x30) & ((st64) 0xffff)))) + ((st32) ((st16) ((Rtt >> 0x20) & ((st64) 0xffff)))) + 0x1 >> 0x1)) : h_tmp601) & ((st64) 0xffff))) << 0x30))); + RzILOpPure *op_LSHIFT_454 = SHIFTL0(SN(64, 0xffff), SN(32, 0x30)); + RzILOpPure *op_NOT_455 = LOGNOT(op_LSHIFT_454); + RzILOpPure *op_AND_456 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_455); + RzILOpPure *op_RSHIFT_516 = SHIFTRA(DUP(Rss), SN(32, 0x30)); + RzILOpPure *op_AND_519 = LOGAND(op_RSHIFT_516, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_524 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_527 = LOGAND(op_RSHIFT_524, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_ADD_531 = ADD(CAST(32, MSB(CAST(16, MSB(op_AND_519), DUP(op_AND_519))), CAST(16, MSB(DUP(op_AND_519)), DUP(op_AND_519))), CAST(32, MSB(CAST(16, MSB(op_AND_527), DUP(op_AND_527))), CAST(16, MSB(DUP(op_AND_527)), DUP(op_AND_527)))); + RzILOpPure *op_ADD_533 = ADD(op_ADD_531, SN(32, 1)); + RzILOpPure *op_RSHIFT_535 = SHIFTRA(op_ADD_533, SN(32, 1)); + RzILOpPure *cond_584 = ITE(DUP(op_EQ_512), CAST(64, MSB(op_RSHIFT_535), DUP(op_RSHIFT_535)), VARL("h_tmp601")); + RzILOpPure *op_AND_587 = LOGAND(cond_584, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_LSHIFT_592 = SHIFTL0(CAST(64, IL_FALSE, op_AND_587), SN(32, 0x30)); + RzILOpPure *op_OR_594 = LOGOR(CAST(64, IL_FALSE, op_AND_456), op_LSHIFT_592); + RzILOpEffect *op_ASSIGN_596 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_594)); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) (((st32) ((st16) (( ...; + RzILOpEffect *seq_597 = SEQN(2, seq_582, op_ASSIGN_596); + + RzILOpEffect *instruction_sequence = SEQN(4, seq_150, seq_299, seq_448, seq_597); + return instruction_sequence; +} + +// Rdd = vxsubaddw(Rss,Rtt):sat +RzILOpEffect *hex_il_op_s4_vxsubaddw(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_72 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))) - ((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff)))), 0x0, 0x20) == ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))) - ((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))) - ((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_15 = SHIFTRA(Rss, SN(32, 0)); + RzILOpPure *op_AND_17 = LOGAND(op_RSHIFT_15, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_24 = SHIFTRA(Rtt, SN(32, 0x20)); + RzILOpPure *op_AND_26 = LOGAND(op_RSHIFT_24, SN(64, 0xffffffff)); + RzILOpPure *op_SUB_29 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_17), DUP(op_AND_17))), CAST(32, MSB(DUP(op_AND_17)), DUP(op_AND_17))), CAST(64, MSB(CAST(32, MSB(op_AND_26), DUP(op_AND_26))), CAST(32, MSB(DUP(op_AND_26)), DUP(op_AND_26)))); + RzILOpPure *op_RSHIFT_38 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_40 = LOGAND(op_RSHIFT_38, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_46 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_48 = LOGAND(op_RSHIFT_46, SN(64, 0xffffffff)); + RzILOpPure *op_SUB_51 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_40), DUP(op_AND_40))), CAST(32, MSB(DUP(op_AND_40)), DUP(op_AND_40))), CAST(64, MSB(CAST(32, MSB(op_AND_48), DUP(op_AND_48))), CAST(32, MSB(DUP(op_AND_48)), DUP(op_AND_48)))); + RzILOpPure *op_EQ_52 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_SUB_29), SN(32, 0), SN(32, 0x20)), op_SUB_51); + RzILOpPure *op_RSHIFT_76 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_78 = LOGAND(op_RSHIFT_76, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_84 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_86 = LOGAND(op_RSHIFT_84, SN(64, 0xffffffff)); + RzILOpPure *op_SUB_89 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_78), DUP(op_AND_78))), CAST(32, MSB(DUP(op_AND_78)), DUP(op_AND_78))), CAST(64, MSB(CAST(32, MSB(op_AND_86), DUP(op_AND_86))), CAST(32, MSB(DUP(op_AND_86)), DUP(op_AND_86)))); + RzILOpPure *op_LT_92 = SLT(op_SUB_89, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_97 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_98 = NEG(op_LSHIFT_97); + RzILOpPure *op_LSHIFT_103 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_106 = SUB(op_LSHIFT_103, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_107 = ITE(op_LT_92, op_NEG_98, op_SUB_106); + RzILOpEffect *gcc_expr_108 = BRANCH(op_EQ_52, EMPTY(), set_usr_field_call_72); + + // h_tmp602 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))) - ((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff)))), 0x0, 0x20) == ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))) - ((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))) - ((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_110 = SETL("h_tmp602", cond_107); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rss > ...; + RzILOpEffect *seq_111 = SEQN(2, gcc_expr_108, op_ASSIGN_hybrid_tmp_110); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))) - ((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff)))), 0x0, 0x20) == ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))) - ((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff)))) ? ((st64) ((st32) ((Rss >> 0x0) & 0xffffffff))) - ((st64) ((st32) ((Rtt >> 0x20) & 0xffffffff))) : h_tmp602) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_RSHIFT_56 = SHIFTRA(DUP(Rss), SN(32, 0)); + RzILOpPure *op_AND_58 = LOGAND(op_RSHIFT_56, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_64 = SHIFTRA(DUP(Rtt), SN(32, 0x20)); + RzILOpPure *op_AND_66 = LOGAND(op_RSHIFT_64, SN(64, 0xffffffff)); + RzILOpPure *op_SUB_69 = SUB(CAST(64, MSB(CAST(32, MSB(op_AND_58), DUP(op_AND_58))), CAST(32, MSB(DUP(op_AND_58)), DUP(op_AND_58))), CAST(64, MSB(CAST(32, MSB(op_AND_66), DUP(op_AND_66))), CAST(32, MSB(DUP(op_AND_66)), DUP(op_AND_66)))); + RzILOpPure *cond_112 = ITE(DUP(op_EQ_52), op_SUB_69, VARL("h_tmp602")); + RzILOpPure *op_AND_114 = LOGAND(cond_112, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_118 = SHIFTL0(op_AND_114, SN(32, 0)); + RzILOpPure *op_OR_119 = LOGOR(op_AND_7, op_LSHIFT_118); + RzILOpEffect *op_ASSIGN_120 = WRITE_REG(bundle, Rdd_op, op_OR_119); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_121 = SEQN(2, seq_111, op_ASSIGN_120); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_192 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))) + ((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff)))), 0x0, 0x20) == ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))) + ((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))) + ((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpPure *op_RSHIFT_136 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_138 = LOGAND(op_RSHIFT_136, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_144 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_146 = LOGAND(op_RSHIFT_144, SN(64, 0xffffffff)); + RzILOpPure *op_ADD_149 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_138), DUP(op_AND_138))), CAST(32, MSB(DUP(op_AND_138)), DUP(op_AND_138))), CAST(64, MSB(CAST(32, MSB(op_AND_146), DUP(op_AND_146))), CAST(32, MSB(DUP(op_AND_146)), DUP(op_AND_146)))); + RzILOpPure *op_RSHIFT_158 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_160 = LOGAND(op_RSHIFT_158, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_166 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_168 = LOGAND(op_RSHIFT_166, SN(64, 0xffffffff)); + RzILOpPure *op_ADD_171 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_160), DUP(op_AND_160))), CAST(32, MSB(DUP(op_AND_160)), DUP(op_AND_160))), CAST(64, MSB(CAST(32, MSB(op_AND_168), DUP(op_AND_168))), CAST(32, MSB(DUP(op_AND_168)), DUP(op_AND_168)))); + RzILOpPure *op_EQ_172 = EQ(SEXTRACT64(CAST(64, IL_FALSE, op_ADD_149), SN(32, 0), SN(32, 0x20)), op_ADD_171); + RzILOpPure *op_RSHIFT_196 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_198 = LOGAND(op_RSHIFT_196, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_204 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_206 = LOGAND(op_RSHIFT_204, SN(64, 0xffffffff)); + RzILOpPure *op_ADD_209 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_198), DUP(op_AND_198))), CAST(32, MSB(DUP(op_AND_198)), DUP(op_AND_198))), CAST(64, MSB(CAST(32, MSB(op_AND_206), DUP(op_AND_206))), CAST(32, MSB(DUP(op_AND_206)), DUP(op_AND_206)))); + RzILOpPure *op_LT_212 = SLT(op_ADD_209, CAST(64, MSB(SN(32, 0)), SN(32, 0))); + RzILOpPure *op_LSHIFT_217 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_NEG_218 = NEG(op_LSHIFT_217); + RzILOpPure *op_LSHIFT_223 = SHIFTL0(SN(64, 1), SN(32, 31)); + RzILOpPure *op_SUB_226 = SUB(op_LSHIFT_223, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_227 = ITE(op_LT_212, op_NEG_218, op_SUB_226); + RzILOpEffect *gcc_expr_228 = BRANCH(op_EQ_172, EMPTY(), set_usr_field_call_192); + + // h_tmp603 = HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))) + ((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff)))), 0x0, 0x20) == ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))) + ((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))) + ((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) < ((st64) 0x0)) ? (-(0x1 << 0x1f)) : (0x1 << 0x1f) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_230 = SETL("h_tmp603", cond_227); + + // seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((Rss > ...; + RzILOpEffect *seq_231 = SEQN(2, gcc_expr_228, op_ASSIGN_hybrid_tmp_230); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((sextract64(((ut64) ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))) + ((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff)))), 0x0, 0x20) == ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))) + ((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff)))) ? ((st64) ((st32) ((Rss >> 0x20) & 0xffffffff))) + ((st64) ((st32) ((Rtt >> 0x0) & 0xffffffff))) : h_tmp603) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_127 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_128 = LOGNOT(op_LSHIFT_127); + RzILOpPure *op_AND_129 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_128); + RzILOpPure *op_RSHIFT_176 = SHIFTRA(DUP(Rss), SN(32, 0x20)); + RzILOpPure *op_AND_178 = LOGAND(op_RSHIFT_176, SN(64, 0xffffffff)); + RzILOpPure *op_RSHIFT_184 = SHIFTRA(DUP(Rtt), SN(32, 0)); + RzILOpPure *op_AND_186 = LOGAND(op_RSHIFT_184, SN(64, 0xffffffff)); + RzILOpPure *op_ADD_189 = ADD(CAST(64, MSB(CAST(32, MSB(op_AND_178), DUP(op_AND_178))), CAST(32, MSB(DUP(op_AND_178)), DUP(op_AND_178))), CAST(64, MSB(CAST(32, MSB(op_AND_186), DUP(op_AND_186))), CAST(32, MSB(DUP(op_AND_186)), DUP(op_AND_186)))); + RzILOpPure *cond_232 = ITE(DUP(op_EQ_172), op_ADD_189, VARL("h_tmp603")); + RzILOpPure *op_AND_234 = LOGAND(cond_232, SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_238 = SHIFTL0(op_AND_234, SN(32, 0x20)); + RzILOpPure *op_OR_239 = LOGOR(op_AND_129, op_LSHIFT_238); + RzILOpEffect *op_ASSIGN_240 = WRITE_REG(bundle, Rdd_op, op_OR_239); + + // seq(seq(HYB(gcc_expr_if ((sextract64(((ut64) ((st64) ((st32) ((R ...; + RzILOpEffect *seq_241 = SEQN(2, seq_231, op_ASSIGN_240); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_121, seq_241); + return instruction_sequence; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_S5_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_S5_ops.c new file mode 100644 index 00000000000..2cb22e94d2b --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_S5_ops.c @@ -0,0 +1,272 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// Rd = vasrhub(Rss,Ii):raw +RzILOpEffect *hex_il_op_s5_asrhub_rnd_sat(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp604 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp604", VARL("i")); + + // seq(h_tmp604 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // u = u; + RzILOpEffect *imm_assign_28 = SETL("u", u); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_71 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((extract64(((ut64) (((st32) (((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))) >> u)) + 0x1 >> 0x1)), 0x0, 0x8) == ((ut64) (((st32) (((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))) >> u)) + 0x1 >> 0x1)))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st32) (((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))) >> u)) + 0x1 >> 0x1) < 0x0) ? ((st64) 0x0) : (0x1 << 0x8) - ((st64) 0x1))); + RzILOpPure *op_MUL_22 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rss, op_MUL_22); + RzILOpPure *op_AND_26 = LOGAND(op_RSHIFT_23, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_30 = SHIFTRA(CAST(16, MSB(op_AND_26), DUP(op_AND_26)), VARL("u")); + RzILOpPure *op_ADD_33 = ADD(CAST(32, MSB(op_RSHIFT_30), DUP(op_RSHIFT_30)), SN(32, 1)); + RzILOpPure *op_RSHIFT_35 = SHIFTRA(op_ADD_33, SN(32, 1)); + RzILOpPure *op_MUL_42 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_43 = SHIFTRA(DUP(Rss), op_MUL_42); + RzILOpPure *op_AND_46 = LOGAND(op_RSHIFT_43, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_48 = SHIFTRA(CAST(16, MSB(op_AND_46), DUP(op_AND_46)), VARL("u")); + RzILOpPure *op_ADD_51 = ADD(CAST(32, MSB(op_RSHIFT_48), DUP(op_RSHIFT_48)), SN(32, 1)); + RzILOpPure *op_RSHIFT_53 = SHIFTRA(op_ADD_51, SN(32, 1)); + RzILOpPure *op_EQ_55 = EQ(EXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_35), SN(32, 0), SN(32, 8)), CAST(64, IL_FALSE, op_RSHIFT_53)); + RzILOpPure *op_MUL_73 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_74 = SHIFTRA(DUP(Rss), op_MUL_73); + RzILOpPure *op_AND_77 = LOGAND(op_RSHIFT_74, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_79 = SHIFTRA(CAST(16, MSB(op_AND_77), DUP(op_AND_77)), VARL("u")); + RzILOpPure *op_ADD_82 = ADD(CAST(32, MSB(op_RSHIFT_79), DUP(op_RSHIFT_79)), SN(32, 1)); + RzILOpPure *op_RSHIFT_84 = SHIFTRA(op_ADD_82, SN(32, 1)); + RzILOpPure *op_LT_86 = SLT(op_RSHIFT_84, SN(32, 0)); + RzILOpPure *op_LSHIFT_90 = SHIFTL0(SN(64, 1), SN(32, 8)); + RzILOpPure *op_SUB_93 = SUB(op_LSHIFT_90, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_95 = ITE(op_LT_86, CAST(64, MSB(SN(32, 0)), SN(32, 0)), op_SUB_93); + RzILOpEffect *gcc_expr_96 = BRANCH(op_EQ_55, EMPTY(), set_usr_field_call_71); + + // h_tmp605 = HYB(gcc_expr_if ((extract64(((ut64) (((st32) (((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))) >> u)) + 0x1 >> 0x1)), 0x0, 0x8) == ((ut64) (((st32) (((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))) >> u)) + 0x1 >> 0x1)))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, (((((st32) (((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))) >> u)) + 0x1 >> 0x1) < 0x0) ? ((st64) 0x0) : (0x1 << 0x8) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_98 = SETL("h_tmp605", cond_95); + + // seq(HYB(gcc_expr_if ((extract64(((ut64) (((st32) (((st16) ((Rss ...; + RzILOpEffect *seq_99 = SEQN(2, gcc_expr_96, op_ASSIGN_hybrid_tmp_98); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xff << i * 0x8)))) | (((ut64) (((extract64(((ut64) (((st32) (((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))) >> u)) + 0x1 >> 0x1)), 0x0, 0x8) == ((ut64) (((st32) (((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))) >> u)) + 0x1 >> 0x1))) ? ((st64) (((st32) (((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))) >> u)) + 0x1 >> 0x1)) : h_tmp605) & 0xff)) << i * 0x8))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_16 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_14); + RzILOpPure *op_MUL_57 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_58 = SHIFTRA(DUP(Rss), op_MUL_57); + RzILOpPure *op_AND_61 = LOGAND(op_RSHIFT_58, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_63 = SHIFTRA(CAST(16, MSB(op_AND_61), DUP(op_AND_61)), VARL("u")); + RzILOpPure *op_ADD_66 = ADD(CAST(32, MSB(op_RSHIFT_63), DUP(op_RSHIFT_63)), SN(32, 1)); + RzILOpPure *op_RSHIFT_68 = SHIFTRA(op_ADD_66, SN(32, 1)); + RzILOpPure *cond_101 = ITE(DUP(op_EQ_55), CAST(64, MSB(op_RSHIFT_68), DUP(op_RSHIFT_68)), VARL("h_tmp605")); + RzILOpPure *op_AND_103 = LOGAND(cond_101, SN(64, 0xff)); + RzILOpPure *op_MUL_106 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_107 = SHIFTL0(CAST(64, IL_FALSE, op_AND_103), op_MUL_106); + RzILOpPure *op_OR_109 = LOGOR(CAST(64, IL_FALSE, op_AND_16), op_LSHIFT_107); + RzILOpEffect *op_ASSIGN_111 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_109)); + + // seq(seq(HYB(gcc_expr_if ((extract64(((ut64) (((st32) (((st16) (( ...; + RzILOpEffect *seq_112 = SEQN(2, seq_99, op_ASSIGN_111); + + // seq(h_tmp604; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) (((st3 ...; + RzILOpEffect *seq_114 = seq_112; + + // seq(seq(h_tmp604; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) (( ...; + RzILOpEffect *seq_115 = SEQN(2, seq_114, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp604; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) (( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_116 = REPEAT(op_LT_4, seq_115); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp604; seq(seq(HYB(g ...; + RzILOpEffect *seq_117 = SEQN(2, op_ASSIGN_2, for_116); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_28, seq_117); + return instruction_sequence; +} + +// Rd = vasrhub(Rss,Ii):sat +RzILOpEffect *hex_il_op_s5_asrhub_sat(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp606 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp606", VARL("i")); + + // seq(h_tmp606 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // u = u; + RzILOpEffect *imm_assign_28 = SETL("u", u); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1)); + RzILOpEffect *set_usr_field_call_56 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, CAST(32, IL_FALSE, SN(32, 1))); + + // HYB(gcc_expr_if ((extract64(((ut64) (((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))) >> u)), 0x0, 0x8) == ((ut64) (((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))) >> u)))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) (((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))) >> u)) < 0x0) ? ((st64) 0x0) : (0x1 << 0x8) - ((st64) 0x1))); + RzILOpPure *op_MUL_22 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rss, op_MUL_22); + RzILOpPure *op_AND_26 = LOGAND(op_RSHIFT_23, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_30 = SHIFTRA(CAST(16, MSB(op_AND_26), DUP(op_AND_26)), VARL("u")); + RzILOpPure *op_MUL_37 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_38 = SHIFTRA(DUP(Rss), op_MUL_37); + RzILOpPure *op_AND_41 = LOGAND(op_RSHIFT_38, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_43 = SHIFTRA(CAST(16, MSB(op_AND_41), DUP(op_AND_41)), VARL("u")); + RzILOpPure *op_EQ_45 = EQ(EXTRACT64(CAST(64, IL_FALSE, op_RSHIFT_30), SN(32, 0), SN(32, 8)), CAST(64, IL_FALSE, op_RSHIFT_43)); + RzILOpPure *op_MUL_58 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_59 = SHIFTRA(DUP(Rss), op_MUL_58); + RzILOpPure *op_AND_62 = LOGAND(op_RSHIFT_59, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_64 = SHIFTRA(CAST(16, MSB(op_AND_62), DUP(op_AND_62)), VARL("u")); + RzILOpPure *op_LT_67 = SLT(CAST(32, MSB(op_RSHIFT_64), DUP(op_RSHIFT_64)), SN(32, 0)); + RzILOpPure *op_LSHIFT_71 = SHIFTL0(SN(64, 1), SN(32, 8)); + RzILOpPure *op_SUB_74 = SUB(op_LSHIFT_71, CAST(64, MSB(SN(32, 1)), SN(32, 1))); + RzILOpPure *cond_76 = ITE(op_LT_67, CAST(64, MSB(SN(32, 0)), SN(32, 0)), op_SUB_74); + RzILOpEffect *gcc_expr_77 = BRANCH(op_EQ_45, EMPTY(), set_usr_field_call_56); + + // h_tmp607 = HYB(gcc_expr_if ((extract64(((ut64) (((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))) >> u)), 0x0, 0x8) == ((ut64) (((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))) >> u)))) {{}} else {set_usr_field(bundle, HEX_REG_FIELD_USR_OVF, ((ut32) 0x1))}, ((((st32) (((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))) >> u)) < 0x0) ? ((st64) 0x0) : (0x1 << 0x8) - ((st64) 0x1))); + RzILOpEffect *op_ASSIGN_hybrid_tmp_79 = SETL("h_tmp607", cond_76); + + // seq(HYB(gcc_expr_if ((extract64(((ut64) (((st16) ((Rss >> i * 0x ...; + RzILOpEffect *seq_80 = SEQN(2, gcc_expr_77, op_ASSIGN_hybrid_tmp_79); + + // Rd = ((st32) (((ut64) (((st64) Rd) & (~(0xff << i * 0x8)))) | (((ut64) (((extract64(((ut64) (((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))) >> u)), 0x0, 0x8) == ((ut64) (((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))) >> u))) ? ((st64) (((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))) >> u)) : h_tmp607) & 0xff)) << i * 0x8))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_16 = LOGAND(CAST(64, MSB(READ_REG(pkt, Rd_op, true)), READ_REG(pkt, Rd_op, true)), op_NOT_14); + RzILOpPure *op_MUL_47 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_48 = SHIFTRA(DUP(Rss), op_MUL_47); + RzILOpPure *op_AND_51 = LOGAND(op_RSHIFT_48, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_53 = SHIFTRA(CAST(16, MSB(op_AND_51), DUP(op_AND_51)), VARL("u")); + RzILOpPure *cond_82 = ITE(DUP(op_EQ_45), CAST(64, MSB(op_RSHIFT_53), DUP(op_RSHIFT_53)), VARL("h_tmp607")); + RzILOpPure *op_AND_84 = LOGAND(cond_82, SN(64, 0xff)); + RzILOpPure *op_MUL_87 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_88 = SHIFTL0(CAST(64, IL_FALSE, op_AND_84), op_MUL_87); + RzILOpPure *op_OR_90 = LOGOR(CAST(64, IL_FALSE, op_AND_16), op_LSHIFT_88); + RzILOpEffect *op_ASSIGN_92 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_OR_90)); + + // seq(seq(HYB(gcc_expr_if ((extract64(((ut64) (((st16) ((Rss >> i ...; + RzILOpEffect *seq_93 = SEQN(2, seq_80, op_ASSIGN_92); + + // seq(h_tmp606; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) (((st1 ...; + RzILOpEffect *seq_95 = seq_93; + + // seq(seq(h_tmp606; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) (( ...; + RzILOpEffect *seq_96 = SEQN(2, seq_95, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp606; seq(seq(HYB(gcc_expr_if ((extract64(((ut64) (( ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_97 = REPEAT(op_LT_4, seq_96); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp606; seq(seq(HYB(g ...; + RzILOpEffect *seq_98 = SEQN(2, op_ASSIGN_2, for_97); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_28, seq_98); + return instruction_sequence; +} + +// Rd = popcount(Rss) +RzILOpEffect *hex_il_op_s5_popcountp(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rdd = vasrh(Rss,Ii):raw +RzILOpEffect *hex_il_op_s5_vasrhrnd(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp608 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp608", VARL("i")); + + // seq(h_tmp608 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // u = u; + RzILOpEffect *imm_assign_24 = SETL("u", u); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x10)))) | (((ut64) ((((st32) (((st16) ((Rss >> i * 0x10) & ((st64) 0xffff))) >> u)) + 0x1 >> 0x1) & 0xffff)) << i * 0x10))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xffff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_RSHIFT_19 = SHIFTRA(Rss, op_MUL_18); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_19, CAST(64, MSB(SN(32, 0xffff)), SN(32, 0xffff))); + RzILOpPure *op_RSHIFT_26 = SHIFTRA(CAST(16, MSB(op_AND_22), DUP(op_AND_22)), VARL("u")); + RzILOpPure *op_ADD_29 = ADD(CAST(32, MSB(op_RSHIFT_26), DUP(op_RSHIFT_26)), SN(32, 1)); + RzILOpPure *op_RSHIFT_31 = SHIFTRA(op_ADD_29, SN(32, 1)); + RzILOpPure *op_AND_33 = LOGAND(op_RSHIFT_31, SN(32, 0xffff)); + RzILOpPure *op_MUL_36 = MUL(VARL("i"), SN(32, 16)); + RzILOpPure *op_LSHIFT_37 = SHIFTL0(CAST(64, IL_FALSE, op_AND_33), op_MUL_36); + RzILOpPure *op_OR_39 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_37); + RzILOpEffect *op_ASSIGN_41 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_39)); + + // seq(h_tmp608; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * 0x1 ...; + RzILOpEffect *seq_43 = op_ASSIGN_41; + + // seq(seq(h_tmp608; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ...; + RzILOpEffect *seq_44 = SEQN(2, seq_43, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp608; Rdd = ((st64) (((ut64) (Rdd & (~(0xffff << i * ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_45 = REPEAT(op_LT_4, seq_44); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp608; Rdd = ((st64) ...; + RzILOpEffect *seq_46 = SEQN(2, op_ASSIGN_2, for_45); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_24, seq_46); + return instruction_sequence; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_S6_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_S6_ops.c new file mode 100644 index 00000000000..be096be9047 --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_S6_ops.c @@ -0,0 +1,549 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// Rdd = rol(Rss,Ii) +RzILOpEffect *hex_il_op_s6_rol_i_p(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rdd = ((st64) ((u == ((ut32) 0x0)) ? ((ut64) Rss) : ((((ut64) Rss) << u) | (((ut64) Rss) >> ((ut32) 0x40) - u)))); + RzILOpPure *op_EQ_5 = EQ(VARL("u"), CAST(32, IL_FALSE, SN(32, 0))); + RzILOpPure *op_LSHIFT_8 = SHIFTL0(CAST(64, IL_FALSE, Rss), VARL("u")); + RzILOpPure *op_SUB_14 = SUB(CAST(32, IL_FALSE, SN(32, 0x40)), VARL("u")); + RzILOpPure *op_RSHIFT_15 = SHIFTR0(CAST(64, IL_FALSE, DUP(Rss)), op_SUB_14); + RzILOpPure *op_OR_16 = LOGOR(op_LSHIFT_8, op_RSHIFT_15); + RzILOpPure *cond_18 = ITE(op_EQ_5, CAST(64, IL_FALSE, DUP(Rss)), op_OR_16); + RzILOpEffect *op_ASSIGN_20 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, cond_18)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_20); + return instruction_sequence; +} + +// Rxx += rol(Rss,Ii) +RzILOpEffect *hex_il_op_s6_rol_i_p_acc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rxx = ((st64) ((ut64) Rxx) + ((u == ((ut32) 0x0)) ? ((ut64) Rss) : ((((ut64) Rss) << u) | (((ut64) Rss) >> ((ut32) 0x40) - u)))); + RzILOpPure *op_EQ_5 = EQ(VARL("u"), CAST(32, IL_FALSE, SN(32, 0))); + RzILOpPure *op_LSHIFT_8 = SHIFTL0(CAST(64, IL_FALSE, Rss), VARL("u")); + RzILOpPure *op_SUB_14 = SUB(CAST(32, IL_FALSE, SN(32, 0x40)), VARL("u")); + RzILOpPure *op_RSHIFT_15 = SHIFTR0(CAST(64, IL_FALSE, DUP(Rss)), op_SUB_14); + RzILOpPure *op_OR_16 = LOGOR(op_LSHIFT_8, op_RSHIFT_15); + RzILOpPure *cond_18 = ITE(op_EQ_5, CAST(64, IL_FALSE, DUP(Rss)), op_OR_16); + RzILOpPure *op_ADD_20 = ADD(CAST(64, IL_FALSE, READ_REG(pkt, Rxx_op, false)), cond_18); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_ADD_20)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_22); + return instruction_sequence; +} + +// Rxx &= rol(Rss,Ii) +RzILOpEffect *hex_il_op_s6_rol_i_p_and(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rxx = ((st64) (((ut64) Rxx) & ((u == ((ut32) 0x0)) ? ((ut64) Rss) : ((((ut64) Rss) << u) | (((ut64) Rss) >> ((ut32) 0x40) - u))))); + RzILOpPure *op_EQ_5 = EQ(VARL("u"), CAST(32, IL_FALSE, SN(32, 0))); + RzILOpPure *op_LSHIFT_8 = SHIFTL0(CAST(64, IL_FALSE, Rss), VARL("u")); + RzILOpPure *op_SUB_14 = SUB(CAST(32, IL_FALSE, SN(32, 0x40)), VARL("u")); + RzILOpPure *op_RSHIFT_15 = SHIFTR0(CAST(64, IL_FALSE, DUP(Rss)), op_SUB_14); + RzILOpPure *op_OR_16 = LOGOR(op_LSHIFT_8, op_RSHIFT_15); + RzILOpPure *cond_18 = ITE(op_EQ_5, CAST(64, IL_FALSE, DUP(Rss)), op_OR_16); + RzILOpPure *op_AND_20 = LOGAND(CAST(64, IL_FALSE, READ_REG(pkt, Rxx_op, false)), cond_18); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_AND_20)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_22); + return instruction_sequence; +} + +// Rxx -= rol(Rss,Ii) +RzILOpEffect *hex_il_op_s6_rol_i_p_nac(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rxx = ((st64) ((ut64) Rxx) - ((u == ((ut32) 0x0)) ? ((ut64) Rss) : ((((ut64) Rss) << u) | (((ut64) Rss) >> ((ut32) 0x40) - u)))); + RzILOpPure *op_EQ_5 = EQ(VARL("u"), CAST(32, IL_FALSE, SN(32, 0))); + RzILOpPure *op_LSHIFT_8 = SHIFTL0(CAST(64, IL_FALSE, Rss), VARL("u")); + RzILOpPure *op_SUB_14 = SUB(CAST(32, IL_FALSE, SN(32, 0x40)), VARL("u")); + RzILOpPure *op_RSHIFT_15 = SHIFTR0(CAST(64, IL_FALSE, DUP(Rss)), op_SUB_14); + RzILOpPure *op_OR_16 = LOGOR(op_LSHIFT_8, op_RSHIFT_15); + RzILOpPure *cond_18 = ITE(op_EQ_5, CAST(64, IL_FALSE, DUP(Rss)), op_OR_16); + RzILOpPure *op_SUB_20 = SUB(CAST(64, IL_FALSE, READ_REG(pkt, Rxx_op, false)), cond_18); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_SUB_20)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_22); + return instruction_sequence; +} + +// Rxx |= rol(Rss,Ii) +RzILOpEffect *hex_il_op_s6_rol_i_p_or(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rxx = ((st64) (((ut64) Rxx) | ((u == ((ut32) 0x0)) ? ((ut64) Rss) : ((((ut64) Rss) << u) | (((ut64) Rss) >> ((ut32) 0x40) - u))))); + RzILOpPure *op_EQ_5 = EQ(VARL("u"), CAST(32, IL_FALSE, SN(32, 0))); + RzILOpPure *op_LSHIFT_8 = SHIFTL0(CAST(64, IL_FALSE, Rss), VARL("u")); + RzILOpPure *op_SUB_14 = SUB(CAST(32, IL_FALSE, SN(32, 0x40)), VARL("u")); + RzILOpPure *op_RSHIFT_15 = SHIFTR0(CAST(64, IL_FALSE, DUP(Rss)), op_SUB_14); + RzILOpPure *op_OR_16 = LOGOR(op_LSHIFT_8, op_RSHIFT_15); + RzILOpPure *cond_18 = ITE(op_EQ_5, CAST(64, IL_FALSE, DUP(Rss)), op_OR_16); + RzILOpPure *op_OR_20 = LOGOR(CAST(64, IL_FALSE, READ_REG(pkt, Rxx_op, false)), cond_18); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_OR_20)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_22); + return instruction_sequence; +} + +// Rxx ^= rol(Rss,Ii) +RzILOpEffect *hex_il_op_s6_rol_i_p_xacc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rxx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rxx = ((st64) (((ut64) Rxx) ^ ((u == ((ut32) 0x0)) ? ((ut64) Rss) : ((((ut64) Rss) << u) | (((ut64) Rss) >> ((ut32) 0x40) - u))))); + RzILOpPure *op_EQ_5 = EQ(VARL("u"), CAST(32, IL_FALSE, SN(32, 0))); + RzILOpPure *op_LSHIFT_8 = SHIFTL0(CAST(64, IL_FALSE, Rss), VARL("u")); + RzILOpPure *op_SUB_14 = SUB(CAST(32, IL_FALSE, SN(32, 0x40)), VARL("u")); + RzILOpPure *op_RSHIFT_15 = SHIFTR0(CAST(64, IL_FALSE, DUP(Rss)), op_SUB_14); + RzILOpPure *op_OR_16 = LOGOR(op_LSHIFT_8, op_RSHIFT_15); + RzILOpPure *cond_18 = ITE(op_EQ_5, CAST(64, IL_FALSE, DUP(Rss)), op_OR_16); + RzILOpPure *op_XOR_20 = LOGXOR(CAST(64, IL_FALSE, READ_REG(pkt, Rxx_op, false)), cond_18); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rxx_op, CAST(64, IL_FALSE, op_XOR_20)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_22); + return instruction_sequence; +} + +// Rd = rol(Rs,Ii) +RzILOpEffect *hex_il_op_s6_rol_i_r(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rd = ((st32) ((u == ((ut32) 0x0)) ? ((ut32) Rs) : ((((ut32) Rs) << u) | (((ut32) Rs) >> ((ut32) 0x20) - u)))); + RzILOpPure *op_EQ_5 = EQ(VARL("u"), CAST(32, IL_FALSE, SN(32, 0))); + RzILOpPure *op_LSHIFT_8 = SHIFTL0(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpPure *op_SUB_14 = SUB(CAST(32, IL_FALSE, SN(32, 0x20)), VARL("u")); + RzILOpPure *op_RSHIFT_15 = SHIFTR0(CAST(32, IL_FALSE, DUP(Rs)), op_SUB_14); + RzILOpPure *op_OR_16 = LOGOR(op_LSHIFT_8, op_RSHIFT_15); + RzILOpPure *cond_18 = ITE(op_EQ_5, CAST(32, IL_FALSE, DUP(Rs)), op_OR_16); + RzILOpEffect *op_ASSIGN_20 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, cond_18)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_20); + return instruction_sequence; +} + +// Rx += rol(Rs,Ii) +RzILOpEffect *hex_il_op_s6_rol_i_r_acc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rx = ((st32) ((ut32) Rx) + ((u == ((ut32) 0x0)) ? ((ut32) Rs) : ((((ut32) Rs) << u) | (((ut32) Rs) >> ((ut32) 0x20) - u)))); + RzILOpPure *op_EQ_5 = EQ(VARL("u"), CAST(32, IL_FALSE, SN(32, 0))); + RzILOpPure *op_LSHIFT_8 = SHIFTL0(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpPure *op_SUB_14 = SUB(CAST(32, IL_FALSE, SN(32, 0x20)), VARL("u")); + RzILOpPure *op_RSHIFT_15 = SHIFTR0(CAST(32, IL_FALSE, DUP(Rs)), op_SUB_14); + RzILOpPure *op_OR_16 = LOGOR(op_LSHIFT_8, op_RSHIFT_15); + RzILOpPure *cond_18 = ITE(op_EQ_5, CAST(32, IL_FALSE, DUP(Rs)), op_OR_16); + RzILOpPure *op_ADD_20 = ADD(CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false)), cond_18); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_ADD_20)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_22); + return instruction_sequence; +} + +// Rx &= rol(Rs,Ii) +RzILOpEffect *hex_il_op_s6_rol_i_r_and(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rx = ((st32) (((ut32) Rx) & ((u == ((ut32) 0x0)) ? ((ut32) Rs) : ((((ut32) Rs) << u) | (((ut32) Rs) >> ((ut32) 0x20) - u))))); + RzILOpPure *op_EQ_5 = EQ(VARL("u"), CAST(32, IL_FALSE, SN(32, 0))); + RzILOpPure *op_LSHIFT_8 = SHIFTL0(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpPure *op_SUB_14 = SUB(CAST(32, IL_FALSE, SN(32, 0x20)), VARL("u")); + RzILOpPure *op_RSHIFT_15 = SHIFTR0(CAST(32, IL_FALSE, DUP(Rs)), op_SUB_14); + RzILOpPure *op_OR_16 = LOGOR(op_LSHIFT_8, op_RSHIFT_15); + RzILOpPure *cond_18 = ITE(op_EQ_5, CAST(32, IL_FALSE, DUP(Rs)), op_OR_16); + RzILOpPure *op_AND_20 = LOGAND(CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false)), cond_18); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_AND_20)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_22); + return instruction_sequence; +} + +// Rx -= rol(Rs,Ii) +RzILOpEffect *hex_il_op_s6_rol_i_r_nac(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rx = ((st32) ((ut32) Rx) - ((u == ((ut32) 0x0)) ? ((ut32) Rs) : ((((ut32) Rs) << u) | (((ut32) Rs) >> ((ut32) 0x20) - u)))); + RzILOpPure *op_EQ_5 = EQ(VARL("u"), CAST(32, IL_FALSE, SN(32, 0))); + RzILOpPure *op_LSHIFT_8 = SHIFTL0(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpPure *op_SUB_14 = SUB(CAST(32, IL_FALSE, SN(32, 0x20)), VARL("u")); + RzILOpPure *op_RSHIFT_15 = SHIFTR0(CAST(32, IL_FALSE, DUP(Rs)), op_SUB_14); + RzILOpPure *op_OR_16 = LOGOR(op_LSHIFT_8, op_RSHIFT_15); + RzILOpPure *cond_18 = ITE(op_EQ_5, CAST(32, IL_FALSE, DUP(Rs)), op_OR_16); + RzILOpPure *op_SUB_20 = SUB(CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false)), cond_18); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_SUB_20)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_22); + return instruction_sequence; +} + +// Rx |= rol(Rs,Ii) +RzILOpEffect *hex_il_op_s6_rol_i_r_or(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rx = ((st32) (((ut32) Rx) | ((u == ((ut32) 0x0)) ? ((ut32) Rs) : ((((ut32) Rs) << u) | (((ut32) Rs) >> ((ut32) 0x20) - u))))); + RzILOpPure *op_EQ_5 = EQ(VARL("u"), CAST(32, IL_FALSE, SN(32, 0))); + RzILOpPure *op_LSHIFT_8 = SHIFTL0(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpPure *op_SUB_14 = SUB(CAST(32, IL_FALSE, SN(32, 0x20)), VARL("u")); + RzILOpPure *op_RSHIFT_15 = SHIFTR0(CAST(32, IL_FALSE, DUP(Rs)), op_SUB_14); + RzILOpPure *op_OR_16 = LOGOR(op_LSHIFT_8, op_RSHIFT_15); + RzILOpPure *cond_18 = ITE(op_EQ_5, CAST(32, IL_FALSE, DUP(Rs)), op_OR_16); + RzILOpPure *op_OR_20 = LOGOR(CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false)), cond_18); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_OR_20)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_22); + return instruction_sequence; +} + +// Rx ^= rol(Rs,Ii) +RzILOpEffect *hex_il_op_s6_rol_i_r_xacc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // u = u; + RzILOpEffect *imm_assign_1 = SETL("u", u); + + // Rx = ((st32) (((ut32) Rx) ^ ((u == ((ut32) 0x0)) ? ((ut32) Rs) : ((((ut32) Rs) << u) | (((ut32) Rs) >> ((ut32) 0x20) - u))))); + RzILOpPure *op_EQ_5 = EQ(VARL("u"), CAST(32, IL_FALSE, SN(32, 0))); + RzILOpPure *op_LSHIFT_8 = SHIFTL0(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpPure *op_SUB_14 = SUB(CAST(32, IL_FALSE, SN(32, 0x20)), VARL("u")); + RzILOpPure *op_RSHIFT_15 = SHIFTR0(CAST(32, IL_FALSE, DUP(Rs)), op_SUB_14); + RzILOpPure *op_OR_16 = LOGOR(op_LSHIFT_8, op_RSHIFT_15); + RzILOpPure *cond_18 = ITE(op_EQ_5, CAST(32, IL_FALSE, DUP(Rs)), op_OR_16); + RzILOpPure *op_XOR_20 = LOGXOR(CAST(32, IL_FALSE, READ_REG(pkt, Rx_op, false)), cond_18); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, op_XOR_20)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_1, op_ASSIGN_22); + return instruction_sequence; +} + +// Rdd = vsplatb(Rs) +RzILOpEffect *hex_il_op_s6_vsplatrbp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp609 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp609", VARL("i")); + + // seq(h_tmp609 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x8)))) | (((ut64) (((st64) ((st32) ((st8) ((Rs >> 0x0) & 0xff)))) & 0xff)) << i * 0x8))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_RSHIFT_20 = SHIFTRA(Rs, SN(32, 0)); + RzILOpPure *op_AND_22 = LOGAND(op_RSHIFT_20, SN(32, 0xff)); + RzILOpPure *op_AND_27 = LOGAND(CAST(64, MSB(CAST(32, MSB(CAST(8, MSB(op_AND_22), DUP(op_AND_22))), CAST(8, MSB(DUP(op_AND_22)), DUP(op_AND_22)))), CAST(32, MSB(CAST(8, MSB(DUP(op_AND_22)), DUP(op_AND_22))), CAST(8, MSB(DUP(op_AND_22)), DUP(op_AND_22)))), SN(64, 0xff)); + RzILOpPure *op_MUL_30 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_31 = SHIFTL0(CAST(64, IL_FALSE, op_AND_27), op_MUL_30); + RzILOpPure *op_OR_33 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_31); + RzILOpEffect *op_ASSIGN_35 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_33)); + + // seq(h_tmp609; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x8)) ...; + RzILOpEffect *seq_37 = op_ASSIGN_35; + + // seq(seq(h_tmp609; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0 ...; + RzILOpEffect *seq_38 = SEQN(2, seq_37, seq_8); + + // while ((i < 0x8)) { seq(seq(h_tmp609; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0 ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 8)); + RzILOpEffect *for_39 = REPEAT(op_LT_4, seq_38); + + // seq(i = 0x0; while ((i < 0x8)) { seq(seq(h_tmp609; Rdd = ((st64) ...; + RzILOpEffect *seq_40 = SEQN(2, op_ASSIGN_2, for_39); + + RzILOpEffect *instruction_sequence = seq_40; + return instruction_sequence; +} + +// Rdd = vtrunehb(Rss,Rtt) +RzILOpEffect *hex_il_op_s6_vtrunehb_ppp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp610 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp610", VARL("i")); + + // seq(h_tmp610 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x8)))) | (((ut64) (((st64) ((st32) ((st8) ((Rtt >> i * 0x2 * 0x8) & ((st64) 0xff))))) & 0xff)) << i * 0x8))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_MUL_20 = MUL(op_MUL_18, SN(32, 8)); + RzILOpPure *op_RSHIFT_21 = SHIFTRA(Rtt, op_MUL_20); + RzILOpPure *op_AND_24 = LOGAND(op_RSHIFT_21, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_AND_29 = LOGAND(CAST(64, MSB(CAST(32, MSB(CAST(8, MSB(op_AND_24), DUP(op_AND_24))), CAST(8, MSB(DUP(op_AND_24)), DUP(op_AND_24)))), CAST(32, MSB(CAST(8, MSB(DUP(op_AND_24)), DUP(op_AND_24))), CAST(8, MSB(DUP(op_AND_24)), DUP(op_AND_24)))), SN(64, 0xff)); + RzILOpPure *op_MUL_32 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_33 = SHIFTL0(CAST(64, IL_FALSE, op_AND_29), op_MUL_32); + RzILOpPure *op_OR_35 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_33); + RzILOpEffect *op_ASSIGN_37 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_35)); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i + 0x4 * 0x8)))) | (((ut64) (((st64) ((st32) ((st8) ((Rss >> i * 0x2 * 0x8) & ((st64) 0xff))))) & 0xff)) << i + 0x4 * 0x8))); + RzILOpPure *op_ADD_41 = ADD(VARL("i"), SN(32, 4)); + RzILOpPure *op_MUL_43 = MUL(op_ADD_41, SN(32, 8)); + RzILOpPure *op_LSHIFT_44 = SHIFTL0(SN(64, 0xff), op_MUL_43); + RzILOpPure *op_NOT_45 = LOGNOT(op_LSHIFT_44); + RzILOpPure *op_AND_46 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_45); + RzILOpPure *op_MUL_49 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_MUL_51 = MUL(op_MUL_49, SN(32, 8)); + RzILOpPure *op_RSHIFT_52 = SHIFTRA(Rss, op_MUL_51); + RzILOpPure *op_AND_55 = LOGAND(op_RSHIFT_52, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_AND_60 = LOGAND(CAST(64, MSB(CAST(32, MSB(CAST(8, MSB(op_AND_55), DUP(op_AND_55))), CAST(8, MSB(DUP(op_AND_55)), DUP(op_AND_55)))), CAST(32, MSB(CAST(8, MSB(DUP(op_AND_55)), DUP(op_AND_55))), CAST(8, MSB(DUP(op_AND_55)), DUP(op_AND_55)))), SN(64, 0xff)); + RzILOpPure *op_ADD_63 = ADD(VARL("i"), SN(32, 4)); + RzILOpPure *op_MUL_65 = MUL(op_ADD_63, SN(32, 8)); + RzILOpPure *op_LSHIFT_66 = SHIFTL0(CAST(64, IL_FALSE, op_AND_60), op_MUL_65); + RzILOpPure *op_OR_68 = LOGOR(CAST(64, IL_FALSE, op_AND_46), op_LSHIFT_66); + RzILOpEffect *op_ASSIGN_70 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_68)); + + // seq(h_tmp610; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x8)) ...; + RzILOpEffect *seq_72 = SEQN(2, op_ASSIGN_37, op_ASSIGN_70); + + // seq(seq(h_tmp610; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0 ...; + RzILOpEffect *seq_73 = SEQN(2, seq_72, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp610; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0 ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_74 = REPEAT(op_LT_4, seq_73); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp610; Rdd = ((st64) ...; + RzILOpEffect *seq_75 = SEQN(2, op_ASSIGN_2, for_74); + + RzILOpEffect *instruction_sequence = seq_75; + return instruction_sequence; +} + +// Rdd = vtrunohb(Rss,Rtt) +RzILOpEffect *hex_il_op_s6_vtrunohb_ppp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: st32 i; + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + const HexOp *Rss_op = ISA2REG(hi, 's', false); + RzILOpPure *Rss = READ_REG(pkt, Rss_op, false); + + // i = 0x0; + RzILOpEffect *op_ASSIGN_2 = SETL("i", SN(32, 0)); + + // HYB(++i); + RzILOpEffect *op_INC_5 = SETL("i", INC(VARL("i"), 32)); + + // h_tmp611 = HYB(++i); + RzILOpEffect *op_ASSIGN_hybrid_tmp_7 = SETL("h_tmp611", VARL("i")); + + // seq(h_tmp611 = HYB(++i); HYB(++i)); + RzILOpEffect *seq_8 = SEQN(2, op_ASSIGN_hybrid_tmp_7, op_INC_5); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x8)))) | (((ut64) (((st64) ((st32) ((st8) ((Rtt >> i * 0x2 + 0x1 * 0x8) & ((st64) 0xff))))) & 0xff)) << i * 0x8))); + RzILOpPure *op_MUL_12 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(SN(64, 0xff), op_MUL_12); + RzILOpPure *op_NOT_14 = LOGNOT(op_LSHIFT_13); + RzILOpPure *op_AND_15 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_14); + RzILOpPure *op_MUL_18 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_ADD_20 = ADD(op_MUL_18, SN(32, 1)); + RzILOpPure *op_MUL_22 = MUL(op_ADD_20, SN(32, 8)); + RzILOpPure *op_RSHIFT_23 = SHIFTRA(Rtt, op_MUL_22); + RzILOpPure *op_AND_26 = LOGAND(op_RSHIFT_23, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_AND_31 = LOGAND(CAST(64, MSB(CAST(32, MSB(CAST(8, MSB(op_AND_26), DUP(op_AND_26))), CAST(8, MSB(DUP(op_AND_26)), DUP(op_AND_26)))), CAST(32, MSB(CAST(8, MSB(DUP(op_AND_26)), DUP(op_AND_26))), CAST(8, MSB(DUP(op_AND_26)), DUP(op_AND_26)))), SN(64, 0xff)); + RzILOpPure *op_MUL_34 = MUL(VARL("i"), SN(32, 8)); + RzILOpPure *op_LSHIFT_35 = SHIFTL0(CAST(64, IL_FALSE, op_AND_31), op_MUL_34); + RzILOpPure *op_OR_37 = LOGOR(CAST(64, IL_FALSE, op_AND_15), op_LSHIFT_35); + RzILOpEffect *op_ASSIGN_39 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_37)); + + // Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i + 0x4 * 0x8)))) | (((ut64) (((st64) ((st32) ((st8) ((Rss >> i * 0x2 + 0x1 * 0x8) & ((st64) 0xff))))) & 0xff)) << i + 0x4 * 0x8))); + RzILOpPure *op_ADD_43 = ADD(VARL("i"), SN(32, 4)); + RzILOpPure *op_MUL_45 = MUL(op_ADD_43, SN(32, 8)); + RzILOpPure *op_LSHIFT_46 = SHIFTL0(SN(64, 0xff), op_MUL_45); + RzILOpPure *op_NOT_47 = LOGNOT(op_LSHIFT_46); + RzILOpPure *op_AND_48 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_47); + RzILOpPure *op_MUL_51 = MUL(VARL("i"), SN(32, 2)); + RzILOpPure *op_ADD_53 = ADD(op_MUL_51, SN(32, 1)); + RzILOpPure *op_MUL_55 = MUL(op_ADD_53, SN(32, 8)); + RzILOpPure *op_RSHIFT_56 = SHIFTRA(Rss, op_MUL_55); + RzILOpPure *op_AND_59 = LOGAND(op_RSHIFT_56, CAST(64, MSB(SN(32, 0xff)), SN(32, 0xff))); + RzILOpPure *op_AND_64 = LOGAND(CAST(64, MSB(CAST(32, MSB(CAST(8, MSB(op_AND_59), DUP(op_AND_59))), CAST(8, MSB(DUP(op_AND_59)), DUP(op_AND_59)))), CAST(32, MSB(CAST(8, MSB(DUP(op_AND_59)), DUP(op_AND_59))), CAST(8, MSB(DUP(op_AND_59)), DUP(op_AND_59)))), SN(64, 0xff)); + RzILOpPure *op_ADD_67 = ADD(VARL("i"), SN(32, 4)); + RzILOpPure *op_MUL_69 = MUL(op_ADD_67, SN(32, 8)); + RzILOpPure *op_LSHIFT_70 = SHIFTL0(CAST(64, IL_FALSE, op_AND_64), op_MUL_69); + RzILOpPure *op_OR_72 = LOGOR(CAST(64, IL_FALSE, op_AND_48), op_LSHIFT_70); + RzILOpEffect *op_ASSIGN_74 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, op_OR_72)); + + // seq(h_tmp611; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0x8)) ...; + RzILOpEffect *seq_76 = SEQN(2, op_ASSIGN_39, op_ASSIGN_74); + + // seq(seq(h_tmp611; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0 ...; + RzILOpEffect *seq_77 = SEQN(2, seq_76, seq_8); + + // while ((i < 0x4)) { seq(seq(h_tmp611; Rdd = ((st64) (((ut64) (Rdd & (~(0xff << i * 0 ... }; + RzILOpPure *op_LT_4 = SLT(VARL("i"), SN(32, 4)); + RzILOpEffect *for_78 = REPEAT(op_LT_4, seq_77); + + // seq(i = 0x0; while ((i < 0x4)) { seq(seq(h_tmp611; Rdd = ((st64) ...; + RzILOpEffect *seq_79 = SEQN(2, op_ASSIGN_2, for_78); + + RzILOpEffect *instruction_sequence = seq_79; + return instruction_sequence; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_SA1_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_SA1_ops.c new file mode 100644 index 00000000000..49d0fdb04cb --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_SA1_ops.c @@ -0,0 +1,570 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// Rx = add(Rxin,Ii) +RzILOpEffect *hex_il_op_sa1_addi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + // s = s; + RzILOpEffect *imm_assign_0 = SETL("s", s); + + // Rx = Rx + s; + RzILOpPure *op_ADD_3 = ADD(READ_REG(pkt, Rx_op, false), VARL("s")); + RzILOpEffect *op_ASSIGN_4 = WRITE_REG(bundle, Rx_op, op_ADD_3); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_0, op_ASSIGN_4); + return instruction_sequence; +} + +// Rx = add(Rxin,Rs) +RzILOpEffect *hex_il_op_sa1_addrx(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rx = Rx + Rs; + RzILOpPure *op_ADD_2 = ADD(READ_REG(pkt, Rx_op, false), Rs); + RzILOpEffect *op_ASSIGN_3 = WRITE_REG(bundle, Rx_op, op_ADD_2); + + RzILOpEffect *instruction_sequence = op_ASSIGN_3; + return instruction_sequence; +} + +// Rd = add(r29,Ii) +RzILOpEffect *hex_il_op_sa1_addsp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp sp_op = ALIAS2OP(HEX_REG_ALIAS_SP, false); + RzILOpPure *sp = READ_REG(pkt, &sp_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // Rd = ((st32) sp + u); + RzILOpPure *op_ADD_4 = ADD(sp, VARL("u")); + RzILOpEffect *op_ASSIGN_6 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, op_ADD_4)); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, op_ASSIGN_6); + return instruction_sequence; +} + +// Rd = and(Rs,#1) +RzILOpEffect *hex_il_op_sa1_and1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = (Rs & 0x1); + RzILOpPure *op_AND_3 = LOGAND(Rs, SN(32, 1)); + RzILOpEffect *op_ASSIGN_4 = WRITE_REG(bundle, Rd_op, op_AND_3); + + RzILOpEffect *instruction_sequence = op_ASSIGN_4; + return instruction_sequence; +} + +// if (!p0) Rd = #0 +RzILOpEffect *hex_il_op_sa1_clrf(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + RzILOpPure *P0 = READ_REG(pkt, &P0_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // Rd = 0x0; + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rd_op, SN(32, 0)); + + // nop; + RzILOpEffect *nop_8 = NOP(); + + // seq(Rd = 0x0); + RzILOpEffect *seq_then_9 = op_ASSIGN_7; + + // seq(nop); + RzILOpEffect *seq_else_10 = nop_8; + + // if (! (((st32) P0) & 0x1)) {seq(Rd = 0x0)} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0), DUP(P0)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_11 = BRANCH(op_INV_4, seq_then_9, seq_else_10); + + RzILOpEffect *instruction_sequence = branch_11; + return instruction_sequence; +} + +// if (!p0.new) Rd = #0 +RzILOpEffect *hex_il_op_sa1_clrfnew(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // Rd = 0x0; + RzILOpEffect *op_ASSIGN_7 = WRITE_REG(bundle, Rd_op, SN(32, 0)); + + // nop; + RzILOpEffect *nop_8 = NOP(); + + // seq(Rd = 0x0); + RzILOpEffect *seq_then_9 = op_ASSIGN_7; + + // seq(nop); + RzILOpEffect *seq_else_10 = nop_8; + + // if (! (((st32) P0_new) & 0x1)) {seq(Rd = 0x0)} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpPure *op_INV_4 = INV(NON_ZERO(op_AND_3)); + RzILOpEffect *branch_11 = BRANCH(op_INV_4, seq_then_9, seq_else_10); + + RzILOpEffect *instruction_sequence = branch_11; + return instruction_sequence; +} + +// if (p0) Rd = #0 +RzILOpEffect *hex_il_op_sa1_clrt(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + RzILOpPure *P0 = READ_REG(pkt, &P0_op, false); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // Rd = 0x0; + RzILOpEffect *op_ASSIGN_6 = WRITE_REG(bundle, Rd_op, SN(32, 0)); + + // nop; + RzILOpEffect *nop_7 = NOP(); + + // seq(Rd = 0x0); + RzILOpEffect *seq_then_8 = op_ASSIGN_6; + + // seq(nop); + RzILOpEffect *seq_else_9 = nop_7; + + // if ((((st32) P0) & 0x1)) {seq(Rd = 0x0)} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0), DUP(P0)), SN(32, 1)); + RzILOpEffect *branch_10 = BRANCH(NON_ZERO(op_AND_3), seq_then_8, seq_else_9); + + RzILOpEffect *instruction_sequence = branch_10; + return instruction_sequence; +} + +// if (p0.new) Rd = #0 +RzILOpEffect *hex_il_op_sa1_clrtnew(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // Rd = 0x0; + RzILOpEffect *op_ASSIGN_6 = WRITE_REG(bundle, Rd_op, SN(32, 0)); + + // nop; + RzILOpEffect *nop_7 = NOP(); + + // seq(Rd = 0x0); + RzILOpEffect *seq_then_8 = op_ASSIGN_6; + + // seq(nop); + RzILOpEffect *seq_else_9 = nop_7; + + // if ((((st32) P0_new) & 0x1)) {seq(Rd = 0x0)} else {seq(nop)}; + RzILOpPure *op_AND_3 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpEffect *branch_10 = BRANCH(NON_ZERO(op_AND_3), seq_then_8, seq_else_9); + + RzILOpEffect *instruction_sequence = branch_10; + return instruction_sequence; +} + +// p0 = cmp.eq(Rs,Ii) +RzILOpEffect *hex_il_op_sa1_cmpeqi(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // P0 = ((st8) ((((ut32) Rs) == u) ? 0xff : 0x0)); + RzILOpPure *op_EQ_5 = EQ(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpPure *cond_8 = ITE(op_EQ_5, SN(32, 0xff), SN(32, 0)); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, &P0_op, CAST(8, MSB(cond_8), DUP(cond_8))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_2, op_ASSIGN_10); + return instruction_sequence; +} + +// Rdd = combine(#0,Ii) +RzILOpEffect *hex_il_op_sa1_combine0i(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_8 = SETL("u", u); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((st64) u) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_AND_12 = LOGAND(CAST(64, IL_FALSE, VARL("u")), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_16 = SHIFTL0(op_AND_12, SN(32, 0)); + RzILOpPure *op_OR_17 = LOGOR(op_AND_7, op_LSHIFT_16); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rdd_op, op_OR_17); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((st64) 0x0) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_25 = LOGNOT(op_LSHIFT_24); + RzILOpPure *op_AND_26 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_25); + RzILOpPure *op_AND_30 = LOGAND(CAST(64, MSB(SN(32, 0)), SN(32, 0)), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_34 = SHIFTL0(op_AND_30, SN(32, 0x20)); + RzILOpPure *op_OR_35 = LOGOR(op_AND_26, op_LSHIFT_34); + RzILOpEffect *op_ASSIGN_36 = WRITE_REG(bundle, Rdd_op, op_OR_35); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_8, op_ASSIGN_18, op_ASSIGN_36); + return instruction_sequence; +} + +// Rdd = combine(#1,Ii) +RzILOpEffect *hex_il_op_sa1_combine1i(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_8 = SETL("u", u); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((st64) u) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_AND_12 = LOGAND(CAST(64, IL_FALSE, VARL("u")), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_16 = SHIFTL0(op_AND_12, SN(32, 0)); + RzILOpPure *op_OR_17 = LOGOR(op_AND_7, op_LSHIFT_16); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rdd_op, op_OR_17); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((st64) 0x1) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_25 = LOGNOT(op_LSHIFT_24); + RzILOpPure *op_AND_26 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_25); + RzILOpPure *op_AND_30 = LOGAND(CAST(64, MSB(SN(32, 1)), SN(32, 1)), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_34 = SHIFTL0(op_AND_30, SN(32, 0x20)); + RzILOpPure *op_OR_35 = LOGOR(op_AND_26, op_LSHIFT_34); + RzILOpEffect *op_ASSIGN_36 = WRITE_REG(bundle, Rdd_op, op_OR_35); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_8, op_ASSIGN_18, op_ASSIGN_36); + return instruction_sequence; +} + +// Rdd = combine(#2,Ii) +RzILOpEffect *hex_il_op_sa1_combine2i(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_8 = SETL("u", u); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((st64) u) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_AND_12 = LOGAND(CAST(64, IL_FALSE, VARL("u")), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_16 = SHIFTL0(op_AND_12, SN(32, 0)); + RzILOpPure *op_OR_17 = LOGOR(op_AND_7, op_LSHIFT_16); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rdd_op, op_OR_17); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((st64) 0x2) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_25 = LOGNOT(op_LSHIFT_24); + RzILOpPure *op_AND_26 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_25); + RzILOpPure *op_AND_30 = LOGAND(CAST(64, MSB(SN(32, 2)), SN(32, 2)), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_34 = SHIFTL0(op_AND_30, SN(32, 0x20)); + RzILOpPure *op_OR_35 = LOGOR(op_AND_26, op_LSHIFT_34); + RzILOpEffect *op_ASSIGN_36 = WRITE_REG(bundle, Rdd_op, op_OR_35); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_8, op_ASSIGN_18, op_ASSIGN_36); + return instruction_sequence; +} + +// Rdd = combine(#3,Ii) +RzILOpEffect *hex_il_op_sa1_combine3i(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_8 = SETL("u", u); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((st64) u) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_AND_12 = LOGAND(CAST(64, IL_FALSE, VARL("u")), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_16 = SHIFTL0(op_AND_12, SN(32, 0)); + RzILOpPure *op_OR_17 = LOGOR(op_AND_7, op_LSHIFT_16); + RzILOpEffect *op_ASSIGN_18 = WRITE_REG(bundle, Rdd_op, op_OR_17); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((st64) 0x3) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_24 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_25 = LOGNOT(op_LSHIFT_24); + RzILOpPure *op_AND_26 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_25); + RzILOpPure *op_AND_30 = LOGAND(CAST(64, MSB(SN(32, 3)), SN(32, 3)), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_34 = SHIFTL0(op_AND_30, SN(32, 0x20)); + RzILOpPure *op_OR_35 = LOGOR(op_AND_26, op_LSHIFT_34); + RzILOpEffect *op_ASSIGN_36 = WRITE_REG(bundle, Rdd_op, op_OR_35); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_8, op_ASSIGN_18, op_ASSIGN_36); + return instruction_sequence; +} + +// Rdd = combine(Rs,#0) +RzILOpEffect *hex_il_op_sa1_combinerz(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((st64) 0x0) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_AND_11 = LOGAND(CAST(64, MSB(SN(32, 0)), SN(32, 0)), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_15 = SHIFTL0(op_AND_11, SN(32, 0)); + RzILOpPure *op_OR_16 = LOGOR(op_AND_7, op_LSHIFT_15); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Rdd_op, op_OR_16); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((st64) Rs) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_24 = LOGNOT(op_LSHIFT_23); + RzILOpPure *op_AND_25 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_24); + RzILOpPure *op_AND_29 = LOGAND(CAST(64, MSB(Rs), DUP(Rs)), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_33 = SHIFTL0(op_AND_29, SN(32, 0x20)); + RzILOpPure *op_OR_34 = LOGOR(op_AND_25, op_LSHIFT_33); + RzILOpEffect *op_ASSIGN_35 = WRITE_REG(bundle, Rdd_op, op_OR_34); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_17, op_ASSIGN_35); + return instruction_sequence; +} + +// Rdd = combine(#0,Rs) +RzILOpEffect *hex_il_op_sa1_combinezr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rdd = ((Rdd & (~(0xffffffff << 0x0))) | ((((st64) Rs) & 0xffffffff) << 0x0)); + RzILOpPure *op_LSHIFT_5 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0)); + RzILOpPure *op_NOT_6 = LOGNOT(op_LSHIFT_5); + RzILOpPure *op_AND_7 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_6); + RzILOpPure *op_AND_11 = LOGAND(CAST(64, MSB(Rs), DUP(Rs)), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_15 = SHIFTL0(op_AND_11, SN(32, 0)); + RzILOpPure *op_OR_16 = LOGOR(op_AND_7, op_LSHIFT_15); + RzILOpEffect *op_ASSIGN_17 = WRITE_REG(bundle, Rdd_op, op_OR_16); + + // Rdd = ((Rdd & (~(0xffffffff << 0x20))) | ((((st64) 0x0) & 0xffffffff) << 0x20)); + RzILOpPure *op_LSHIFT_23 = SHIFTL0(SN(64, 0xffffffff), SN(32, 0x20)); + RzILOpPure *op_NOT_24 = LOGNOT(op_LSHIFT_23); + RzILOpPure *op_AND_25 = LOGAND(READ_REG(pkt, Rdd_op, true), op_NOT_24); + RzILOpPure *op_AND_29 = LOGAND(CAST(64, MSB(SN(32, 0)), SN(32, 0)), SN(64, 0xffffffff)); + RzILOpPure *op_LSHIFT_33 = SHIFTL0(op_AND_29, SN(32, 0x20)); + RzILOpPure *op_OR_34 = LOGOR(op_AND_25, op_LSHIFT_33); + RzILOpEffect *op_ASSIGN_35 = WRITE_REG(bundle, Rdd_op, op_OR_34); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_17, op_ASSIGN_35); + return instruction_sequence; +} + +// Rd = add(Rs,n1) +RzILOpEffect *hex_il_op_sa1_dec(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = Rs - 0x1; + RzILOpPure *op_SUB_3 = SUB(Rs, SN(32, 1)); + RzILOpEffect *op_ASSIGN_4 = WRITE_REG(bundle, Rd_op, op_SUB_3); + + RzILOpEffect *instruction_sequence = op_ASSIGN_4; + return instruction_sequence; +} + +// Rd = add(Rs,#1) +RzILOpEffect *hex_il_op_sa1_inc(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = Rs + 0x1; + RzILOpPure *op_ADD_3 = ADD(Rs, SN(32, 1)); + RzILOpEffect *op_ASSIGN_4 = WRITE_REG(bundle, Rd_op, op_ADD_3); + + RzILOpEffect *instruction_sequence = op_ASSIGN_4; + return instruction_sequence; +} + +// Rd = Ii +RzILOpEffect *hex_il_op_sa1_seti(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + // READ + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_0 = SETL("u", u); + + // Rd = ((st32) u); + RzILOpEffect *op_ASSIGN_4 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, VARL("u"))); + + RzILOpEffect *instruction_sequence = SEQN(2, imm_assign_0, op_ASSIGN_4); + return instruction_sequence; +} + +// Rd = n1 +RzILOpEffect *hex_il_op_sa1_setin1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // Rd = -0x1; + RzILOpEffect *op_ASSIGN_3 = WRITE_REG(bundle, Rd_op, SN(32, -1)); + + RzILOpEffect *instruction_sequence = op_ASSIGN_3; + return instruction_sequence; +} + +// Rd = sxtb(Rs) +RzILOpEffect *hex_il_op_sa1_sxtb(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = ((st32) sextract64(((ut64) Rs), 0x0, 0x8)); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rs), SN(32, 0), SN(32, 8))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rs)), SN(32, 0), SN(32, 8)))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_11; + return instruction_sequence; +} + +// Rd = sxth(Rs) +RzILOpEffect *hex_il_op_sa1_sxth(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = ((st32) sextract64(((ut64) Rs), 0x0, 0x10)); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(SEXTRACT64(CAST(64, IL_FALSE, Rs), SN(32, 0), SN(32, 16))), SEXTRACT64(CAST(64, IL_FALSE, DUP(Rs)), SN(32, 0), SN(32, 16)))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_11; + return instruction_sequence; +} + +// Rd = Rs +RzILOpEffect *hex_il_op_sa1_tfr(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = Rs; + RzILOpEffect *op_ASSIGN_2 = WRITE_REG(bundle, Rd_op, Rs); + + RzILOpEffect *instruction_sequence = op_ASSIGN_2; + return instruction_sequence; +} + +// Rd = and(Rs,#255) +RzILOpEffect *hex_il_op_sa1_zxtb(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = ((st32) extract64(((ut64) Rs), 0x0, 0x8)); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, EXTRACT64(CAST(64, IL_FALSE, Rs), SN(32, 0), SN(32, 8)))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_11; + return instruction_sequence; +} + +// Rd = zxth(Rs) +RzILOpEffect *hex_il_op_sa1_zxth(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + + // Rd = ((st32) extract64(((ut64) Rs), 0x0, 0x10)); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, EXTRACT64(CAST(64, IL_FALSE, Rs), SN(32, 0), SN(32, 16)))); + + RzILOpEffect *instruction_sequence = op_ASSIGN_11; + return instruction_sequence; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_SL1_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_SL1_ops.c new file mode 100644 index 00000000000..04e83198618 --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_SL1_ops.c @@ -0,0 +1,69 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// Rd = memw(Rs+Ii) +RzILOpEffect *hex_il_op_sl1_loadri_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // Rd = ((st32) ((ut32) mem_load_32(EA))); + RzILOpPure *ml_EA_9 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(32, IL_FALSE, ml_EA_9))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_2, op_ASSIGN_6, op_ASSIGN_12); + return instruction_sequence; +} + +// Rd = memub(Rs+Ii) +RzILOpEffect *hex_il_op_sl1_loadrub_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // Rd = ((st32) ((ut8) mem_load_8(EA))); + RzILOpPure *ml_EA_9 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(8, IL_FALSE, ml_EA_9))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_2, op_ASSIGN_6, op_ASSIGN_12); + return instruction_sequence; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_SL2_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_SL2_ops.c new file mode 100644 index 00000000000..613dd76df11 --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_SL2_ops.c @@ -0,0 +1,594 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// deallocframe +RzILOpEffect *hex_il_op_sl2_deallocframe(HexInsnPktBundle *bundle) { + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut64 tmp; + // Declare: ut32 EA; + const HexOp fp_op = ALIAS2OP(HEX_REG_ALIAS_FP, false); + const HexOp framekey_op = ALIAS2OP(HEX_REG_ALIAS_FRAMEKEY, false); + RzILOpPure *framekey = READ_REG(pkt, &framekey_op, false); + const HexOp lr_op = ALIAS2OP(HEX_REG_ALIAS_LR, false); + const HexOp sp_op = ALIAS2OP(HEX_REG_ALIAS_SP, false); + + // EA = fp; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", READ_REG(pkt, &fp_op, true)); + + // tmp = ((ut64) mem_load_64(EA)); + RzILOpPure *ml_EA_5 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_7 = SETL("tmp", CAST(64, IL_FALSE, ml_EA_5)); + + // tmp = (tmp ^ (((ut64) framekey) << 0x20)); + RzILOpPure *op_LSHIFT_11 = SHIFTL0(CAST(64, IL_FALSE, framekey), SN(32, 0x20)); + RzILOpPure *op_XOR_12 = LOGXOR(VARL("tmp"), op_LSHIFT_11); + RzILOpEffect *op_ASSIGN_13 = SETL("tmp", op_XOR_12); + + // lr = ((ut32) ((st64) ((st32) ((tmp >> 0x20) & ((ut64) 0xffffffff))))); + RzILOpPure *op_RSHIFT_18 = SHIFTR0(VARL("tmp"), SN(32, 0x20)); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_18, CAST(64, IL_FALSE, SN(64, 0xffffffff))); + RzILOpEffect *op_ASSIGN_25 = WRITE_REG(bundle, &lr_op, CAST(32, IL_FALSE, CAST(64, MSB(CAST(32, IL_FALSE, op_AND_21)), CAST(32, IL_FALSE, DUP(op_AND_21))))); + + // fp = ((ut32) ((st64) ((st32) ((tmp >> 0x0) & ((ut64) 0xffffffff))))); + RzILOpPure *op_RSHIFT_29 = SHIFTR0(VARL("tmp"), SN(32, 0)); + RzILOpPure *op_AND_32 = LOGAND(op_RSHIFT_29, CAST(64, IL_FALSE, SN(64, 0xffffffff))); + RzILOpEffect *op_ASSIGN_36 = WRITE_REG(bundle, &fp_op, CAST(32, IL_FALSE, CAST(64, MSB(CAST(32, IL_FALSE, op_AND_32)), CAST(32, IL_FALSE, DUP(op_AND_32))))); + + // sp = EA + ((ut32) 0x8); + RzILOpPure *op_ADD_40 = ADD(VARL("EA"), CAST(32, IL_FALSE, SN(32, 8))); + RzILOpEffect *op_ASSIGN_41 = WRITE_REG(bundle, &sp_op, op_ADD_40); + + RzILOpEffect *instruction_sequence = SEQN(6, op_ASSIGN_3, op_ASSIGN_7, op_ASSIGN_13, op_ASSIGN_25, op_ASSIGN_36, op_ASSIGN_41); + return instruction_sequence; +} + +// jumpr r31 +RzILOpEffect *hex_il_op_sl2_jumpr31(HexInsnPktBundle *bundle) { + HexPkt *pkt = bundle->pkt; + // READ + const HexOp lr_op = ALIAS2OP(HEX_REG_ALIAS_LR, false); + RzILOpPure *lr = READ_REG(pkt, &lr_op, false); + + // jump(lr); + RzILOpEffect *jump_lr_1 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", lr)); + + RzILOpEffect *instruction_sequence = jump_lr_1; + return instruction_sequence; +} + +// if (!p0) jumpr r31 +RzILOpEffect *hex_il_op_sl2_jumpr31_f(HexInsnPktBundle *bundle) { + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + RzILOpPure *P0 = READ_REG(pkt, &P0_op, false); + const HexOp lr_op = ALIAS2OP(HEX_REG_ALIAS_LR, false); + RzILOpPure *lr = READ_REG(pkt, &lr_op, false); + + // jump(lr); + RzILOpEffect *jump_lr_7 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", lr)); + + // seq(jump(lr)); + RzILOpEffect *seq_then_9 = jump_lr_7; + + // if (! (((st32) P0) & 0x1)) {seq(jump(lr))} else {{}}; + RzILOpPure *op_AND_4 = LOGAND(CAST(32, MSB(P0), DUP(P0)), SN(32, 1)); + RzILOpPure *op_INV_5 = INV(NON_ZERO(op_AND_4)); + RzILOpEffect *branch_10 = BRANCH(op_INV_5, seq_then_9, EMPTY()); + + RzILOpEffect *instruction_sequence = branch_10; + return instruction_sequence; +} + +// if (!p0.new) jumpr:nt r31 +RzILOpEffect *hex_il_op_sl2_jumpr31_fnew(HexInsnPktBundle *bundle) { + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + const HexOp lr_op = ALIAS2OP(HEX_REG_ALIAS_LR, false); + RzILOpPure *lr = READ_REG(pkt, &lr_op, false); + + // jump(lr); + RzILOpEffect *jump_lr_7 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", lr)); + + // seq(jump(lr)); + RzILOpEffect *seq_then_9 = jump_lr_7; + + // if (! (((st32) P0_new) & 0x1)) {seq(jump(lr))} else {{}}; + RzILOpPure *op_AND_4 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpPure *op_INV_5 = INV(NON_ZERO(op_AND_4)); + RzILOpEffect *branch_10 = BRANCH(op_INV_5, seq_then_9, EMPTY()); + + RzILOpEffect *instruction_sequence = branch_10; + return instruction_sequence; +} + +// if (p0) jumpr r31 +RzILOpEffect *hex_il_op_sl2_jumpr31_t(HexInsnPktBundle *bundle) { + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + RzILOpPure *P0 = READ_REG(pkt, &P0_op, false); + const HexOp lr_op = ALIAS2OP(HEX_REG_ALIAS_LR, false); + RzILOpPure *lr = READ_REG(pkt, &lr_op, false); + + // jump(lr); + RzILOpEffect *jump_lr_6 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", lr)); + + // seq(jump(lr)); + RzILOpEffect *seq_then_8 = jump_lr_6; + + // if ((((st32) P0) & 0x1)) {seq(jump(lr))} else {{}}; + RzILOpPure *op_AND_4 = LOGAND(CAST(32, MSB(P0), DUP(P0)), SN(32, 1)); + RzILOpEffect *branch_9 = BRANCH(NON_ZERO(op_AND_4), seq_then_8, EMPTY()); + + RzILOpEffect *instruction_sequence = branch_9; + return instruction_sequence; +} + +// if (p0.new) jumpr:nt r31 +RzILOpEffect *hex_il_op_sl2_jumpr31_tnew(HexInsnPktBundle *bundle) { + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + const HexOp lr_op = ALIAS2OP(HEX_REG_ALIAS_LR, false); + RzILOpPure *lr = READ_REG(pkt, &lr_op, false); + + // jump(lr); + RzILOpEffect *jump_lr_6 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", lr)); + + // seq(jump(lr)); + RzILOpEffect *seq_then_8 = jump_lr_6; + + // if ((((st32) P0_new) & 0x1)) {seq(jump(lr))} else {{}}; + RzILOpPure *op_AND_4 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpEffect *branch_9 = BRANCH(NON_ZERO(op_AND_4), seq_then_8, EMPTY()); + + RzILOpEffect *instruction_sequence = branch_9; + return instruction_sequence; +} + +// Rd = memb(Rs+Ii) +RzILOpEffect *hex_il_op_sl2_loadrb_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // Rd = ((st32) ((st8) mem_load_8(EA))); + RzILOpPure *ml_EA_9 = LOADW(8, VARL("EA")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(8, MSB(ml_EA_9), DUP(ml_EA_9))), CAST(8, MSB(DUP(ml_EA_9)), DUP(ml_EA_9)))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_2, op_ASSIGN_6, op_ASSIGN_12); + return instruction_sequence; +} + +// Rdd = memd(r29+Ii) +RzILOpEffect *hex_il_op_sl2_loadrd_sp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp sp_op = ALIAS2OP(HEX_REG_ALIAS_SP, false); + RzILOpPure *sp = READ_REG(pkt, &sp_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rdd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // EA = sp + u; + RzILOpPure *op_ADD_4 = ADD(sp, VARL("u")); + RzILOpEffect *op_ASSIGN_5 = SETL("EA", op_ADD_4); + + // Rdd = ((st64) ((ut64) mem_load_64(EA))); + RzILOpPure *ml_EA_8 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rdd_op, CAST(64, IL_FALSE, CAST(64, IL_FALSE, ml_EA_8))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_2, op_ASSIGN_5, op_ASSIGN_11); + return instruction_sequence; +} + +// Rd = memh(Rs+Ii) +RzILOpEffect *hex_il_op_sl2_loadrh_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // Rd = ((st32) ((st16) mem_load_16(EA))); + RzILOpPure *ml_EA_9 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rd_op, CAST(32, MSB(CAST(16, MSB(ml_EA_9), DUP(ml_EA_9))), CAST(16, MSB(DUP(ml_EA_9)), DUP(ml_EA_9)))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_2, op_ASSIGN_6, op_ASSIGN_12); + return instruction_sequence; +} + +// Rd = memw(r29+Ii) +RzILOpEffect *hex_il_op_sl2_loadri_sp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp sp_op = ALIAS2OP(HEX_REG_ALIAS_SP, false); + RzILOpPure *sp = READ_REG(pkt, &sp_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // EA = sp + u; + RzILOpPure *op_ADD_4 = ADD(sp, VARL("u")); + RzILOpEffect *op_ASSIGN_5 = SETL("EA", op_ADD_4); + + // Rd = ((st32) ((ut32) mem_load_32(EA))); + RzILOpPure *ml_EA_8 = LOADW(32, VARL("EA")); + RzILOpEffect *op_ASSIGN_11 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(32, IL_FALSE, ml_EA_8))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_2, op_ASSIGN_5, op_ASSIGN_11); + return instruction_sequence; +} + +// Rd = memuh(Rs+Ii) +RzILOpEffect *hex_il_op_sl2_loadruh_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rd_op = ISA2REG(hi, 'd', false); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // Rd = ((st32) ((ut16) mem_load_16(EA))); + RzILOpPure *ml_EA_9 = LOADW(16, VARL("EA")); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, Rd_op, CAST(32, IL_FALSE, CAST(16, IL_FALSE, ml_EA_9))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_2, op_ASSIGN_6, op_ASSIGN_12); + return instruction_sequence; +} + +// dealloc_return +RzILOpEffect *hex_il_op_sl2_return(HexInsnPktBundle *bundle) { + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut64 tmp; + // Declare: ut32 EA; + const HexOp fp_op = ALIAS2OP(HEX_REG_ALIAS_FP, false); + const HexOp framekey_op = ALIAS2OP(HEX_REG_ALIAS_FRAMEKEY, false); + RzILOpPure *framekey = READ_REG(pkt, &framekey_op, false); + const HexOp lr_op = ALIAS2OP(HEX_REG_ALIAS_LR, false); + const HexOp sp_op = ALIAS2OP(HEX_REG_ALIAS_SP, false); + + // EA = fp; + RzILOpEffect *op_ASSIGN_3 = SETL("EA", READ_REG(pkt, &fp_op, true)); + + // tmp = ((ut64) mem_load_64(EA)); + RzILOpPure *ml_EA_5 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_7 = SETL("tmp", CAST(64, IL_FALSE, ml_EA_5)); + + // tmp = (tmp ^ (((ut64) framekey) << 0x20)); + RzILOpPure *op_LSHIFT_11 = SHIFTL0(CAST(64, IL_FALSE, framekey), SN(32, 0x20)); + RzILOpPure *op_XOR_12 = LOGXOR(VARL("tmp"), op_LSHIFT_11); + RzILOpEffect *op_ASSIGN_13 = SETL("tmp", op_XOR_12); + + // lr = ((ut32) ((st64) ((st32) ((tmp >> 0x20) & ((ut64) 0xffffffff))))); + RzILOpPure *op_RSHIFT_18 = SHIFTR0(VARL("tmp"), SN(32, 0x20)); + RzILOpPure *op_AND_21 = LOGAND(op_RSHIFT_18, CAST(64, IL_FALSE, SN(64, 0xffffffff))); + RzILOpEffect *op_ASSIGN_25 = WRITE_REG(bundle, &lr_op, CAST(32, IL_FALSE, CAST(64, MSB(CAST(32, IL_FALSE, op_AND_21)), CAST(32, IL_FALSE, DUP(op_AND_21))))); + + // fp = ((ut32) ((st64) ((st32) ((tmp >> 0x0) & ((ut64) 0xffffffff))))); + RzILOpPure *op_RSHIFT_29 = SHIFTR0(VARL("tmp"), SN(32, 0)); + RzILOpPure *op_AND_32 = LOGAND(op_RSHIFT_29, CAST(64, IL_FALSE, SN(64, 0xffffffff))); + RzILOpEffect *op_ASSIGN_36 = WRITE_REG(bundle, &fp_op, CAST(32, IL_FALSE, CAST(64, MSB(CAST(32, IL_FALSE, op_AND_32)), CAST(32, IL_FALSE, DUP(op_AND_32))))); + + // sp = EA + ((ut32) 0x8); + RzILOpPure *op_ADD_40 = ADD(VARL("EA"), CAST(32, IL_FALSE, SN(32, 8))); + RzILOpEffect *op_ASSIGN_41 = WRITE_REG(bundle, &sp_op, op_ADD_40); + + // jump(((ut32) ((st64) ((st32) ((tmp >> 0x20) & ((ut64) 0xffffffff)))))); + RzILOpPure *op_RSHIFT_45 = SHIFTR0(VARL("tmp"), SN(32, 0x20)); + RzILOpPure *op_AND_48 = LOGAND(op_RSHIFT_45, CAST(64, IL_FALSE, SN(64, 0xffffffff))); + RzILOpEffect *jump_cast_ut32_51_52 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", CAST(32, IL_FALSE, CAST(64, MSB(CAST(32, IL_FALSE, op_AND_48)), CAST(32, IL_FALSE, DUP(op_AND_48)))))); + + RzILOpEffect *instruction_sequence = SEQN(7, op_ASSIGN_3, op_ASSIGN_7, op_ASSIGN_13, op_ASSIGN_25, op_ASSIGN_36, op_ASSIGN_41, jump_cast_ut32_51_52); + return instruction_sequence; +} + +// if (!p0) dealloc_return +RzILOpEffect *hex_il_op_sl2_return_f(HexInsnPktBundle *bundle) { + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut64 tmp; + // Declare: ut32 EA; + const HexOp fp_op = ALIAS2OP(HEX_REG_ALIAS_FP, false); + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + RzILOpPure *P0 = READ_REG(pkt, &P0_op, false); + const HexOp framekey_op = ALIAS2OP(HEX_REG_ALIAS_FRAMEKEY, false); + RzILOpPure *framekey = READ_REG(pkt, &framekey_op, false); + const HexOp lr_op = ALIAS2OP(HEX_REG_ALIAS_LR, false); + const HexOp sp_op = ALIAS2OP(HEX_REG_ALIAS_SP, false); + + // EA = fp; + RzILOpEffect *op_ASSIGN_5 = SETL("EA", READ_REG(pkt, &fp_op, true)); + + // tmp = ((ut64) mem_load_64(EA)); + RzILOpPure *ml_EA_12 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_14 = SETL("tmp", CAST(64, IL_FALSE, ml_EA_12)); + + // tmp = (tmp ^ (((ut64) framekey) << 0x20)); + RzILOpPure *op_LSHIFT_18 = SHIFTL0(CAST(64, IL_FALSE, framekey), SN(32, 0x20)); + RzILOpPure *op_XOR_19 = LOGXOR(VARL("tmp"), op_LSHIFT_18); + RzILOpEffect *op_ASSIGN_20 = SETL("tmp", op_XOR_19); + + // lr = ((ut32) ((st64) ((st32) ((tmp >> 0x20) & ((ut64) 0xffffffff))))); + RzILOpPure *op_RSHIFT_25 = SHIFTR0(VARL("tmp"), SN(32, 0x20)); + RzILOpPure *op_AND_28 = LOGAND(op_RSHIFT_25, CAST(64, IL_FALSE, SN(64, 0xffffffff))); + RzILOpEffect *op_ASSIGN_32 = WRITE_REG(bundle, &lr_op, CAST(32, IL_FALSE, CAST(64, MSB(CAST(32, IL_FALSE, op_AND_28)), CAST(32, IL_FALSE, DUP(op_AND_28))))); + + // fp = ((ut32) ((st64) ((st32) ((tmp >> 0x0) & ((ut64) 0xffffffff))))); + RzILOpPure *op_RSHIFT_36 = SHIFTR0(VARL("tmp"), SN(32, 0)); + RzILOpPure *op_AND_39 = LOGAND(op_RSHIFT_36, CAST(64, IL_FALSE, SN(64, 0xffffffff))); + RzILOpEffect *op_ASSIGN_43 = WRITE_REG(bundle, &fp_op, CAST(32, IL_FALSE, CAST(64, MSB(CAST(32, IL_FALSE, op_AND_39)), CAST(32, IL_FALSE, DUP(op_AND_39))))); + + // sp = EA + ((ut32) 0x8); + RzILOpPure *op_ADD_47 = ADD(VARL("EA"), CAST(32, IL_FALSE, SN(32, 8))); + RzILOpEffect *op_ASSIGN_48 = WRITE_REG(bundle, &sp_op, op_ADD_47); + + // jump(((ut32) ((st64) ((st32) ((tmp >> 0x20) & ((ut64) 0xffffffff)))))); + RzILOpPure *op_RSHIFT_52 = SHIFTR0(VARL("tmp"), SN(32, 0x20)); + RzILOpPure *op_AND_55 = LOGAND(op_RSHIFT_52, CAST(64, IL_FALSE, SN(64, 0xffffffff))); + RzILOpEffect *jump_cast_ut32_58_59 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", CAST(32, IL_FALSE, CAST(64, MSB(CAST(32, IL_FALSE, op_AND_55)), CAST(32, IL_FALSE, DUP(op_AND_55)))))); + + // nop; + RzILOpEffect *nop_61 = NOP(); + + // seq(tmp = ((ut64) mem_load_64(EA)); tmp = (tmp ^ (((ut64) framek ...; + RzILOpEffect *seq_then_63 = SEQN(6, op_ASSIGN_14, op_ASSIGN_20, op_ASSIGN_32, op_ASSIGN_43, op_ASSIGN_48, jump_cast_ut32_58_59); + + // seq(nop); + RzILOpEffect *seq_else_64 = nop_61; + + // if (! (((st32) P0) & 0x1)) {seq(tmp = ((ut64) mem_load_64(EA)); tmp = (tmp ^ (((ut64) framek ...} else {seq(nop)}; + RzILOpPure *op_AND_10 = LOGAND(CAST(32, MSB(P0), DUP(P0)), SN(32, 1)); + RzILOpPure *op_INV_11 = INV(NON_ZERO(op_AND_10)); + RzILOpEffect *branch_65 = BRANCH(op_INV_11, seq_then_63, seq_else_64); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_5, branch_65); + return instruction_sequence; +} + +// if (!p0.new) dealloc_return:nt +RzILOpEffect *hex_il_op_sl2_return_fnew(HexInsnPktBundle *bundle) { + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut64 tmp; + // Declare: ut32 EA; + const HexOp fp_op = ALIAS2OP(HEX_REG_ALIAS_FP, false); + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + const HexOp framekey_op = ALIAS2OP(HEX_REG_ALIAS_FRAMEKEY, false); + RzILOpPure *framekey = READ_REG(pkt, &framekey_op, false); + const HexOp lr_op = ALIAS2OP(HEX_REG_ALIAS_LR, false); + const HexOp sp_op = ALIAS2OP(HEX_REG_ALIAS_SP, false); + + // EA = fp; + RzILOpEffect *op_ASSIGN_4 = SETL("EA", READ_REG(pkt, &fp_op, true)); + + // tmp = ((ut64) mem_load_64(EA)); + RzILOpPure *ml_EA_11 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_13 = SETL("tmp", CAST(64, IL_FALSE, ml_EA_11)); + + // tmp = (tmp ^ (((ut64) framekey) << 0x20)); + RzILOpPure *op_LSHIFT_17 = SHIFTL0(CAST(64, IL_FALSE, framekey), SN(32, 0x20)); + RzILOpPure *op_XOR_18 = LOGXOR(VARL("tmp"), op_LSHIFT_17); + RzILOpEffect *op_ASSIGN_19 = SETL("tmp", op_XOR_18); + + // lr = ((ut32) ((st64) ((st32) ((tmp >> 0x20) & ((ut64) 0xffffffff))))); + RzILOpPure *op_RSHIFT_24 = SHIFTR0(VARL("tmp"), SN(32, 0x20)); + RzILOpPure *op_AND_27 = LOGAND(op_RSHIFT_24, CAST(64, IL_FALSE, SN(64, 0xffffffff))); + RzILOpEffect *op_ASSIGN_31 = WRITE_REG(bundle, &lr_op, CAST(32, IL_FALSE, CAST(64, MSB(CAST(32, IL_FALSE, op_AND_27)), CAST(32, IL_FALSE, DUP(op_AND_27))))); + + // fp = ((ut32) ((st64) ((st32) ((tmp >> 0x0) & ((ut64) 0xffffffff))))); + RzILOpPure *op_RSHIFT_35 = SHIFTR0(VARL("tmp"), SN(32, 0)); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_35, CAST(64, IL_FALSE, SN(64, 0xffffffff))); + RzILOpEffect *op_ASSIGN_42 = WRITE_REG(bundle, &fp_op, CAST(32, IL_FALSE, CAST(64, MSB(CAST(32, IL_FALSE, op_AND_38)), CAST(32, IL_FALSE, DUP(op_AND_38))))); + + // sp = EA + ((ut32) 0x8); + RzILOpPure *op_ADD_46 = ADD(VARL("EA"), CAST(32, IL_FALSE, SN(32, 8))); + RzILOpEffect *op_ASSIGN_47 = WRITE_REG(bundle, &sp_op, op_ADD_46); + + // jump(((ut32) ((st64) ((st32) ((tmp >> 0x20) & ((ut64) 0xffffffff)))))); + RzILOpPure *op_RSHIFT_51 = SHIFTR0(VARL("tmp"), SN(32, 0x20)); + RzILOpPure *op_AND_54 = LOGAND(op_RSHIFT_51, CAST(64, IL_FALSE, SN(64, 0xffffffff))); + RzILOpEffect *jump_cast_ut32_57_58 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", CAST(32, IL_FALSE, CAST(64, MSB(CAST(32, IL_FALSE, op_AND_54)), CAST(32, IL_FALSE, DUP(op_AND_54)))))); + + // nop; + RzILOpEffect *nop_60 = NOP(); + + // seq(tmp = ((ut64) mem_load_64(EA)); tmp = (tmp ^ (((ut64) framek ...; + RzILOpEffect *seq_then_62 = SEQN(6, op_ASSIGN_13, op_ASSIGN_19, op_ASSIGN_31, op_ASSIGN_42, op_ASSIGN_47, jump_cast_ut32_57_58); + + // seq(nop); + RzILOpEffect *seq_else_63 = nop_60; + + // if (! (((st32) P0_new) & 0x1)) {seq(tmp = ((ut64) mem_load_64(EA)); tmp = (tmp ^ (((ut64) framek ...} else {seq(nop)}; + RzILOpPure *op_AND_9 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpPure *op_INV_10 = INV(NON_ZERO(op_AND_9)); + RzILOpEffect *branch_64 = BRANCH(op_INV_10, seq_then_62, seq_else_63); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_4, branch_64); + return instruction_sequence; +} + +// if (p0) dealloc_return +RzILOpEffect *hex_il_op_sl2_return_t(HexInsnPktBundle *bundle) { + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut64 tmp; + // Declare: ut32 EA; + const HexOp fp_op = ALIAS2OP(HEX_REG_ALIAS_FP, false); + const HexOp P0_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, false); + RzILOpPure *P0 = READ_REG(pkt, &P0_op, false); + const HexOp framekey_op = ALIAS2OP(HEX_REG_ALIAS_FRAMEKEY, false); + RzILOpPure *framekey = READ_REG(pkt, &framekey_op, false); + const HexOp lr_op = ALIAS2OP(HEX_REG_ALIAS_LR, false); + const HexOp sp_op = ALIAS2OP(HEX_REG_ALIAS_SP, false); + + // EA = fp; + RzILOpEffect *op_ASSIGN_5 = SETL("EA", READ_REG(pkt, &fp_op, true)); + + // tmp = ((ut64) mem_load_64(EA)); + RzILOpPure *ml_EA_11 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_13 = SETL("tmp", CAST(64, IL_FALSE, ml_EA_11)); + + // tmp = (tmp ^ (((ut64) framekey) << 0x20)); + RzILOpPure *op_LSHIFT_17 = SHIFTL0(CAST(64, IL_FALSE, framekey), SN(32, 0x20)); + RzILOpPure *op_XOR_18 = LOGXOR(VARL("tmp"), op_LSHIFT_17); + RzILOpEffect *op_ASSIGN_19 = SETL("tmp", op_XOR_18); + + // lr = ((ut32) ((st64) ((st32) ((tmp >> 0x20) & ((ut64) 0xffffffff))))); + RzILOpPure *op_RSHIFT_24 = SHIFTR0(VARL("tmp"), SN(32, 0x20)); + RzILOpPure *op_AND_27 = LOGAND(op_RSHIFT_24, CAST(64, IL_FALSE, SN(64, 0xffffffff))); + RzILOpEffect *op_ASSIGN_31 = WRITE_REG(bundle, &lr_op, CAST(32, IL_FALSE, CAST(64, MSB(CAST(32, IL_FALSE, op_AND_27)), CAST(32, IL_FALSE, DUP(op_AND_27))))); + + // fp = ((ut32) ((st64) ((st32) ((tmp >> 0x0) & ((ut64) 0xffffffff))))); + RzILOpPure *op_RSHIFT_35 = SHIFTR0(VARL("tmp"), SN(32, 0)); + RzILOpPure *op_AND_38 = LOGAND(op_RSHIFT_35, CAST(64, IL_FALSE, SN(64, 0xffffffff))); + RzILOpEffect *op_ASSIGN_42 = WRITE_REG(bundle, &fp_op, CAST(32, IL_FALSE, CAST(64, MSB(CAST(32, IL_FALSE, op_AND_38)), CAST(32, IL_FALSE, DUP(op_AND_38))))); + + // sp = EA + ((ut32) 0x8); + RzILOpPure *op_ADD_46 = ADD(VARL("EA"), CAST(32, IL_FALSE, SN(32, 8))); + RzILOpEffect *op_ASSIGN_47 = WRITE_REG(bundle, &sp_op, op_ADD_46); + + // jump(((ut32) ((st64) ((st32) ((tmp >> 0x20) & ((ut64) 0xffffffff)))))); + RzILOpPure *op_RSHIFT_51 = SHIFTR0(VARL("tmp"), SN(32, 0x20)); + RzILOpPure *op_AND_54 = LOGAND(op_RSHIFT_51, CAST(64, IL_FALSE, SN(64, 0xffffffff))); + RzILOpEffect *jump_cast_ut32_57_58 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", CAST(32, IL_FALSE, CAST(64, MSB(CAST(32, IL_FALSE, op_AND_54)), CAST(32, IL_FALSE, DUP(op_AND_54)))))); + + // nop; + RzILOpEffect *nop_60 = NOP(); + + // seq(tmp = ((ut64) mem_load_64(EA)); tmp = (tmp ^ (((ut64) framek ...; + RzILOpEffect *seq_then_62 = SEQN(6, op_ASSIGN_13, op_ASSIGN_19, op_ASSIGN_31, op_ASSIGN_42, op_ASSIGN_47, jump_cast_ut32_57_58); + + // seq(nop); + RzILOpEffect *seq_else_63 = nop_60; + + // if ((((st32) P0) & 0x1)) {seq(tmp = ((ut64) mem_load_64(EA)); tmp = (tmp ^ (((ut64) framek ...} else {seq(nop)}; + RzILOpPure *op_AND_10 = LOGAND(CAST(32, MSB(P0), DUP(P0)), SN(32, 1)); + RzILOpEffect *branch_64 = BRANCH(NON_ZERO(op_AND_10), seq_then_62, seq_else_63); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_5, branch_64); + return instruction_sequence; +} + +// if (p0.new) dealloc_return:nt +RzILOpEffect *hex_il_op_sl2_return_tnew(HexInsnPktBundle *bundle) { + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut64 tmp; + // Declare: ut32 EA; + const HexOp fp_op = ALIAS2OP(HEX_REG_ALIAS_FP, false); + const HexOp P0_new_op = EXPLICIT2OP(0, HEX_REG_CLASS_PRED_REGS, true); + RzILOpPure *P0_new = READ_REG(pkt, &P0_new_op, true); + const HexOp framekey_op = ALIAS2OP(HEX_REG_ALIAS_FRAMEKEY, false); + RzILOpPure *framekey = READ_REG(pkt, &framekey_op, false); + const HexOp lr_op = ALIAS2OP(HEX_REG_ALIAS_LR, false); + const HexOp sp_op = ALIAS2OP(HEX_REG_ALIAS_SP, false); + + // EA = fp; + RzILOpEffect *op_ASSIGN_4 = SETL("EA", READ_REG(pkt, &fp_op, true)); + + // tmp = ((ut64) mem_load_64(EA)); + RzILOpPure *ml_EA_10 = LOADW(64, VARL("EA")); + RzILOpEffect *op_ASSIGN_12 = SETL("tmp", CAST(64, IL_FALSE, ml_EA_10)); + + // tmp = (tmp ^ (((ut64) framekey) << 0x20)); + RzILOpPure *op_LSHIFT_16 = SHIFTL0(CAST(64, IL_FALSE, framekey), SN(32, 0x20)); + RzILOpPure *op_XOR_17 = LOGXOR(VARL("tmp"), op_LSHIFT_16); + RzILOpEffect *op_ASSIGN_18 = SETL("tmp", op_XOR_17); + + // lr = ((ut32) ((st64) ((st32) ((tmp >> 0x20) & ((ut64) 0xffffffff))))); + RzILOpPure *op_RSHIFT_23 = SHIFTR0(VARL("tmp"), SN(32, 0x20)); + RzILOpPure *op_AND_26 = LOGAND(op_RSHIFT_23, CAST(64, IL_FALSE, SN(64, 0xffffffff))); + RzILOpEffect *op_ASSIGN_30 = WRITE_REG(bundle, &lr_op, CAST(32, IL_FALSE, CAST(64, MSB(CAST(32, IL_FALSE, op_AND_26)), CAST(32, IL_FALSE, DUP(op_AND_26))))); + + // fp = ((ut32) ((st64) ((st32) ((tmp >> 0x0) & ((ut64) 0xffffffff))))); + RzILOpPure *op_RSHIFT_34 = SHIFTR0(VARL("tmp"), SN(32, 0)); + RzILOpPure *op_AND_37 = LOGAND(op_RSHIFT_34, CAST(64, IL_FALSE, SN(64, 0xffffffff))); + RzILOpEffect *op_ASSIGN_41 = WRITE_REG(bundle, &fp_op, CAST(32, IL_FALSE, CAST(64, MSB(CAST(32, IL_FALSE, op_AND_37)), CAST(32, IL_FALSE, DUP(op_AND_37))))); + + // sp = EA + ((ut32) 0x8); + RzILOpPure *op_ADD_45 = ADD(VARL("EA"), CAST(32, IL_FALSE, SN(32, 8))); + RzILOpEffect *op_ASSIGN_46 = WRITE_REG(bundle, &sp_op, op_ADD_45); + + // jump(((ut32) ((st64) ((st32) ((tmp >> 0x20) & ((ut64) 0xffffffff)))))); + RzILOpPure *op_RSHIFT_50 = SHIFTR0(VARL("tmp"), SN(32, 0x20)); + RzILOpPure *op_AND_53 = LOGAND(op_RSHIFT_50, CAST(64, IL_FALSE, SN(64, 0xffffffff))); + RzILOpEffect *jump_cast_ut32_56_57 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", CAST(32, IL_FALSE, CAST(64, MSB(CAST(32, IL_FALSE, op_AND_53)), CAST(32, IL_FALSE, DUP(op_AND_53)))))); + + // nop; + RzILOpEffect *nop_59 = NOP(); + + // seq(tmp = ((ut64) mem_load_64(EA)); tmp = (tmp ^ (((ut64) framek ...; + RzILOpEffect *seq_then_61 = SEQN(6, op_ASSIGN_12, op_ASSIGN_18, op_ASSIGN_30, op_ASSIGN_41, op_ASSIGN_46, jump_cast_ut32_56_57); + + // seq(nop); + RzILOpEffect *seq_else_62 = nop_59; + + // if ((((st32) P0_new) & 0x1)) {seq(tmp = ((ut64) mem_load_64(EA)); tmp = (tmp ^ (((ut64) framek ...} else {seq(nop)}; + RzILOpPure *op_AND_9 = LOGAND(CAST(32, MSB(P0_new), DUP(P0_new)), SN(32, 1)); + RzILOpEffect *branch_63 = BRANCH(NON_ZERO(op_AND_9), seq_then_61, seq_else_62); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_4, branch_63); + return instruction_sequence; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_SS1_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_SS1_ops.c new file mode 100644 index 00000000000..b33a881a4c6 --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_SS1_ops.c @@ -0,0 +1,71 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// memb(Rs+Ii) = Rt +RzILOpEffect *hex_il_op_ss1_storeb_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut8(EA, ((ut8) ((st8) ((Rt >> 0x0) & 0xff)))); + RzILOpPure *op_RSHIFT_12 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_14 = LOGAND(op_RSHIFT_12, SN(32, 0xff)); + RzILOpEffect *ms_cast_ut8_16_17 = STOREW(VARL("EA"), CAST(8, IL_FALSE, CAST(8, MSB(op_AND_14), DUP(op_AND_14)))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_2, op_ASSIGN_6, ms_cast_ut8_16_17); + return instruction_sequence; +} + +// memw(Rs+Ii) = Rt +RzILOpEffect *hex_il_op_ss1_storew_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut32(EA, ((ut32) Rt)); + RzILOpEffect *ms_cast_ut32_9_10 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Rt)); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_2, op_ASSIGN_6, ms_cast_ut32_9_10); + return instruction_sequence; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_SS2_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_SS2_ops.c new file mode 100644 index 00000000000..2fd7ed572bd --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_SS2_ops.c @@ -0,0 +1,232 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// allocframe(Ii) +RzILOpEffect *hex_il_op_ss2_allocframe(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp sp_op = ALIAS2OP(HEX_REG_ALIAS_SP, false); + const HexOp lr_op = ALIAS2OP(HEX_REG_ALIAS_LR, false); + RzILOpPure *lr = READ_REG(pkt, &lr_op, false); + const HexOp fp_op = ALIAS2OP(HEX_REG_ALIAS_FP, false); + const HexOp framekey_op = ALIAS2OP(HEX_REG_ALIAS_FRAMEKEY, false); + RzILOpPure *framekey = READ_REG(pkt, &framekey_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // EA = sp + ((ut32) -0x8); + RzILOpPure *op_ADD_5 = ADD(READ_REG(pkt, &sp_op, true), CAST(32, IL_FALSE, SN(32, -8))); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut64(EA, ((ut64) (((((ut64) lr) << 0x20) | ((ut64) fp)) ^ (((ut64) framekey) << 0x20)))); + RzILOpPure *op_LSHIFT_11 = SHIFTL0(CAST(64, IL_FALSE, lr), SN(32, 0x20)); + RzILOpPure *op_OR_14 = LOGOR(op_LSHIFT_11, CAST(64, IL_FALSE, READ_REG(pkt, &fp_op, true))); + RzILOpPure *op_LSHIFT_18 = SHIFTL0(CAST(64, IL_FALSE, framekey), SN(32, 0x20)); + RzILOpPure *op_XOR_19 = LOGXOR(op_OR_14, op_LSHIFT_18); + RzILOpEffect *ms_cast_ut64_20_21 = STOREW(VARL("EA"), CAST(64, IL_FALSE, op_XOR_19)); + + // fp = EA; + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, &fp_op, VARL("EA")); + + // u = u; + RzILOpEffect *imm_assign_24 = SETL("u", u); + + // sp = EA - u; + RzILOpPure *op_SUB_26 = SUB(VARL("EA"), VARL("u")); + RzILOpEffect *op_ASSIGN_27 = WRITE_REG(bundle, &sp_op, op_SUB_26); + + RzILOpEffect *instruction_sequence = SEQN(5, imm_assign_24, op_ASSIGN_6, ms_cast_ut64_20_21, op_ASSIGN_22, op_ASSIGN_27); + return instruction_sequence; +} + +// memb(Rs+Ii) = #0 +RzILOpEffect *hex_il_op_ss2_storebi0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut8(EA, ((ut8) 0x0)); + RzILOpEffect *ms_cast_ut8_9_10 = STOREW(VARL("EA"), CAST(8, IL_FALSE, SN(32, 0))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_2, op_ASSIGN_6, ms_cast_ut8_9_10); + return instruction_sequence; +} + +// memb(Rs+Ii) = #1 +RzILOpEffect *hex_il_op_ss2_storebi1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut8(EA, ((ut8) 0x1)); + RzILOpEffect *ms_cast_ut8_9_10 = STOREW(VARL("EA"), CAST(8, IL_FALSE, SN(32, 1))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_2, op_ASSIGN_6, ms_cast_ut8_9_10); + return instruction_sequence; +} + +// memd(r29+Ii) = Rtt +RzILOpEffect *hex_il_op_ss2_stored_sp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp sp_op = ALIAS2OP(HEX_REG_ALIAS_SP, false); + RzILOpPure *sp = READ_REG(pkt, &sp_op, false); + RzILOpPure *s = SN(32, (st32)ISA2IMM(hi, 's')); + const HexOp *Rtt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rtt = READ_REG(pkt, Rtt_op, false); + + // s = s; + RzILOpEffect *imm_assign_2 = SETL("s", s); + + // EA = sp + ((ut32) s); + RzILOpPure *op_ADD_5 = ADD(sp, CAST(32, IL_FALSE, VARL("s"))); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut64(EA, ((ut64) Rtt)); + RzILOpEffect *ms_cast_ut64_9_10 = STOREW(VARL("EA"), CAST(64, IL_FALSE, Rtt)); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_2, op_ASSIGN_6, ms_cast_ut64_9_10); + return instruction_sequence; +} + +// memh(Rs+Ii) = Rt +RzILOpEffect *hex_il_op_ss2_storeh_io(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut16(EA, ((ut16) ((st16) ((Rt >> 0x0) & 0xffff)))); + RzILOpPure *op_RSHIFT_12 = SHIFTRA(Rt, SN(32, 0)); + RzILOpPure *op_AND_14 = LOGAND(op_RSHIFT_12, SN(32, 0xffff)); + RzILOpEffect *ms_cast_ut16_16_17 = STOREW(VARL("EA"), CAST(16, IL_FALSE, CAST(16, MSB(op_AND_14), DUP(op_AND_14)))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_2, op_ASSIGN_6, ms_cast_ut16_16_17); + return instruction_sequence; +} + +// memw(r29+Ii) = Rt +RzILOpEffect *hex_il_op_ss2_storew_sp(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp sp_op = ALIAS2OP(HEX_REG_ALIAS_SP, false); + RzILOpPure *sp = READ_REG(pkt, &sp_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + const HexOp *Rt_op = ISA2REG(hi, 't', false); + RzILOpPure *Rt = READ_REG(pkt, Rt_op, false); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // EA = sp + u; + RzILOpPure *op_ADD_4 = ADD(sp, VARL("u")); + RzILOpEffect *op_ASSIGN_5 = SETL("EA", op_ADD_4); + + // mem_store_ut32(EA, ((ut32) Rt)); + RzILOpEffect *ms_cast_ut32_8_9 = STOREW(VARL("EA"), CAST(32, IL_FALSE, Rt)); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_2, op_ASSIGN_5, ms_cast_ut32_8_9); + return instruction_sequence; +} + +// memw(Rs+Ii) = #0 +RzILOpEffect *hex_il_op_ss2_storewi0(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut32(EA, ((ut32) 0x0)); + RzILOpEffect *ms_cast_ut32_9_10 = STOREW(VARL("EA"), CAST(32, IL_FALSE, SN(32, 0))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_2, op_ASSIGN_6, ms_cast_ut32_9_10); + return instruction_sequence; +} + +// memw(Rs+Ii) = #1 +RzILOpEffect *hex_il_op_ss2_storewi1(HexInsnPktBundle *bundle) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + // READ + // Declare: ut32 EA; + const HexOp *Rs_op = ISA2REG(hi, 's', false); + RzILOpPure *Rs = READ_REG(pkt, Rs_op, false); + RzILOpPure *u = UN(32, (ut32)ISA2IMM(hi, 'u')); + + // u = u; + RzILOpEffect *imm_assign_2 = SETL("u", u); + + // EA = ((ut32) Rs) + u; + RzILOpPure *op_ADD_5 = ADD(CAST(32, IL_FALSE, Rs), VARL("u")); + RzILOpEffect *op_ASSIGN_6 = SETL("EA", op_ADD_5); + + // mem_store_ut32(EA, ((ut32) 0x1)); + RzILOpEffect *ms_cast_ut32_9_10 = STOREW(VARL("EA"), CAST(32, IL_FALSE, SN(32, 1))); + + RzILOpEffect *instruction_sequence = SEQN(3, imm_assign_2, op_ASSIGN_6, ms_cast_ut32_9_10); + return instruction_sequence; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_UNDOCUMENTED_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_UNDOCUMENTED_ops.c new file mode 100644 index 00000000000..334a9126fe5 --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_UNDOCUMENTED_ops.c @@ -0,0 +1,22 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// Rd = Ii +RzILOpEffect *hex_il_op_undocumented_sa2_tfrsi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_V6_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_V6_ops.c new file mode 100644 index 00000000000..45f6f3e98ac --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_V6_ops.c @@ -0,0 +1,1077 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// Vd = vmemu(Rt+Ii) +RzILOpEffect *hex_il_op_v6_vl32ub_ai(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd = vmemu(Rx++Ii) +RzILOpEffect *hex_il_op_v6_vl32ub_pi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd = vmemu(Rx++Mu) +RzILOpEffect *hex_il_op_v6_vl32ub_ppu(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd = vmem(Rt+Ii) +RzILOpEffect *hex_il_op_v6_vl32b_ai(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.cur = vmem(Rt+Ii) +RzILOpEffect *hex_il_op_v6_vl32b_cur_ai(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (!Pv) Vd.cur = vmem(Rt+Ii) +RzILOpEffect *hex_il_op_v6_vl32b_cur_npred_ai(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (!Pv) Vd.cur = vmem(Rx++Ii) +RzILOpEffect *hex_il_op_v6_vl32b_cur_npred_pi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (!Pv) Vd.cur = vmem(Rx++Mu) +RzILOpEffect *hex_il_op_v6_vl32b_cur_npred_ppu(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.cur = vmem(Rx++Ii) +RzILOpEffect *hex_il_op_v6_vl32b_cur_pi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.cur = vmem(Rx++Mu) +RzILOpEffect *hex_il_op_v6_vl32b_cur_ppu(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (Pv) Vd.cur = vmem(Rt+Ii) +RzILOpEffect *hex_il_op_v6_vl32b_cur_pred_ai(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (Pv) Vd.cur = vmem(Rx++Ii) +RzILOpEffect *hex_il_op_v6_vl32b_cur_pred_pi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (Pv) Vd.cur = vmem(Rx++Mu) +RzILOpEffect *hex_il_op_v6_vl32b_cur_pred_ppu(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (!Pv) Vd = vmem(Rt+Ii) +RzILOpEffect *hex_il_op_v6_vl32b_npred_ai(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (!Pv) Vd = vmem(Rx++Ii) +RzILOpEffect *hex_il_op_v6_vl32b_npred_pi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (!Pv) Vd = vmem(Rx++Mu) +RzILOpEffect *hex_il_op_v6_vl32b_npred_ppu(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd = vmem(Rt+Ii):nt +RzILOpEffect *hex_il_op_v6_vl32b_nt_ai(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.cur = vmem(Rt+Ii):nt +RzILOpEffect *hex_il_op_v6_vl32b_nt_cur_ai(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (!Pv) Vd.cur = vmem(Rt+Ii):nt +RzILOpEffect *hex_il_op_v6_vl32b_nt_cur_npred_ai(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (!Pv) Vd.cur = vmem(Rx++Ii):nt +RzILOpEffect *hex_il_op_v6_vl32b_nt_cur_npred_pi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (!Pv) Vd.cur = vmem(Rx++Mu):nt +RzILOpEffect *hex_il_op_v6_vl32b_nt_cur_npred_ppu(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.cur = vmem(Rx++Ii):nt +RzILOpEffect *hex_il_op_v6_vl32b_nt_cur_pi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.cur = vmem(Rx++Mu):nt +RzILOpEffect *hex_il_op_v6_vl32b_nt_cur_ppu(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (Pv) Vd.cur = vmem(Rt+Ii):nt +RzILOpEffect *hex_il_op_v6_vl32b_nt_cur_pred_ai(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (Pv) Vd.cur = vmem(Rx++Ii):nt +RzILOpEffect *hex_il_op_v6_vl32b_nt_cur_pred_pi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (Pv) Vd.cur = vmem(Rx++Mu):nt +RzILOpEffect *hex_il_op_v6_vl32b_nt_cur_pred_ppu(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (!Pv) Vd = vmem(Rt+Ii):nt +RzILOpEffect *hex_il_op_v6_vl32b_nt_npred_ai(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (!Pv) Vd = vmem(Rx++Ii):nt +RzILOpEffect *hex_il_op_v6_vl32b_nt_npred_pi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (!Pv) Vd = vmem(Rx++Mu):nt +RzILOpEffect *hex_il_op_v6_vl32b_nt_npred_ppu(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd = vmem(Rx++Ii):nt +RzILOpEffect *hex_il_op_v6_vl32b_nt_pi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd = vmem(Rx++Mu):nt +RzILOpEffect *hex_il_op_v6_vl32b_nt_ppu(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (Pv) Vd = vmem(Rt+Ii):nt +RzILOpEffect *hex_il_op_v6_vl32b_nt_pred_ai(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (Pv) Vd = vmem(Rx++Ii):nt +RzILOpEffect *hex_il_op_v6_vl32b_nt_pred_pi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (Pv) Vd = vmem(Rx++Mu):nt +RzILOpEffect *hex_il_op_v6_vl32b_nt_pred_ppu(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.tmp = vmem(Rt+Ii):nt +RzILOpEffect *hex_il_op_v6_vl32b_nt_tmp_ai(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (!Pv) Vd.tmp = vmem(Rt+Ii):nt +RzILOpEffect *hex_il_op_v6_vl32b_nt_tmp_npred_ai(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (!Pv) Vd.tmp = vmem(Rx++Ii):nt +RzILOpEffect *hex_il_op_v6_vl32b_nt_tmp_npred_pi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (!Pv) Vd.tmp = vmem(Rx++Mu):nt +RzILOpEffect *hex_il_op_v6_vl32b_nt_tmp_npred_ppu(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.tmp = vmem(Rx++Ii):nt +RzILOpEffect *hex_il_op_v6_vl32b_nt_tmp_pi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.tmp = vmem(Rx++Mu):nt +RzILOpEffect *hex_il_op_v6_vl32b_nt_tmp_ppu(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (Pv) Vd.tmp = vmem(Rt+Ii):nt +RzILOpEffect *hex_il_op_v6_vl32b_nt_tmp_pred_ai(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (Pv) Vd.tmp = vmem(Rx++Ii):nt +RzILOpEffect *hex_il_op_v6_vl32b_nt_tmp_pred_pi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (Pv) Vd.tmp = vmem(Rx++Mu):nt +RzILOpEffect *hex_il_op_v6_vl32b_nt_tmp_pred_ppu(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd = vmem(Rx++Ii) +RzILOpEffect *hex_il_op_v6_vl32b_pi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd = vmem(Rx++Mu) +RzILOpEffect *hex_il_op_v6_vl32b_ppu(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (Pv) Vd = vmem(Rt+Ii) +RzILOpEffect *hex_il_op_v6_vl32b_pred_ai(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (Pv) Vd = vmem(Rx++Ii) +RzILOpEffect *hex_il_op_v6_vl32b_pred_pi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (Pv) Vd = vmem(Rx++Mu) +RzILOpEffect *hex_il_op_v6_vl32b_pred_ppu(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.tmp = vmem(Rt+Ii) +RzILOpEffect *hex_il_op_v6_vl32b_tmp_ai(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (!Pv) Vd.tmp = vmem(Rt+Ii) +RzILOpEffect *hex_il_op_v6_vl32b_tmp_npred_ai(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (!Pv) Vd.tmp = vmem(Rx++Ii) +RzILOpEffect *hex_il_op_v6_vl32b_tmp_npred_pi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (!Pv) Vd.tmp = vmem(Rx++Mu) +RzILOpEffect *hex_il_op_v6_vl32b_tmp_npred_ppu(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.tmp = vmem(Rx++Ii) +RzILOpEffect *hex_il_op_v6_vl32b_tmp_pi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.tmp = vmem(Rx++Mu) +RzILOpEffect *hex_il_op_v6_vl32b_tmp_ppu(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (Pv) Vd.tmp = vmem(Rt+Ii) +RzILOpEffect *hex_il_op_v6_vl32b_tmp_pred_ai(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (Pv) Vd.tmp = vmem(Rx++Ii) +RzILOpEffect *hex_il_op_v6_vl32b_tmp_pred_pi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (Pv) Vd.tmp = vmem(Rx++Mu) +RzILOpEffect *hex_il_op_v6_vl32b_tmp_pred_ppu(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// vmemu(Rt+Ii) = Vs +RzILOpEffect *hex_il_op_v6_vs32ub_ai(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (!Pv) vmemu(Rt+Ii) = Vs +RzILOpEffect *hex_il_op_v6_vs32ub_npred_ai(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (!Pv) vmemu(Rx++Ii) = Vs +RzILOpEffect *hex_il_op_v6_vs32ub_npred_pi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (!Pv) vmemu(Rx++Mu) = Vs +RzILOpEffect *hex_il_op_v6_vs32ub_npred_ppu(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// vmemu(Rx++Ii) = Vs +RzILOpEffect *hex_il_op_v6_vs32ub_pi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// vmemu(Rx++Mu) = Vs +RzILOpEffect *hex_il_op_v6_vs32ub_ppu(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (Pv) vmemu(Rt+Ii) = Vs +RzILOpEffect *hex_il_op_v6_vs32ub_pred_ai(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (Pv) vmemu(Rx++Ii) = Vs +RzILOpEffect *hex_il_op_v6_vs32ub_pred_pi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (Pv) vmemu(Rx++Mu) = Vs +RzILOpEffect *hex_il_op_v6_vs32ub_pred_ppu(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// vmem(Rt+Ii) = Vs +RzILOpEffect *hex_il_op_v6_vs32b_ai(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (!Pv) vmem(Rt+Ii) = Vs +RzILOpEffect *hex_il_op_v6_vs32b_npred_ai(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (!Pv) vmem(Rx++Ii) = Vs +RzILOpEffect *hex_il_op_v6_vs32b_npred_pi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (!Pv) vmem(Rx++Mu) = Vs +RzILOpEffect *hex_il_op_v6_vs32b_npred_ppu(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (!Qv) vmem(Rt+Ii) = Vs +RzILOpEffect *hex_il_op_v6_vs32b_nqpred_ai(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (!Qv) vmem(Rx++Ii) = Vs +RzILOpEffect *hex_il_op_v6_vs32b_nqpred_pi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (!Qv) vmem(Rx++Mu) = Vs +RzILOpEffect *hex_il_op_v6_vs32b_nqpred_ppu(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// vmem(Rt+Ii):nt = Vs +RzILOpEffect *hex_il_op_v6_vs32b_nt_ai(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (!Pv) vmem(Rt+Ii):nt = Vs +RzILOpEffect *hex_il_op_v6_vs32b_nt_npred_ai(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (!Pv) vmem(Rx++Ii):nt = Vs +RzILOpEffect *hex_il_op_v6_vs32b_nt_npred_pi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (!Pv) vmem(Rx++Mu):nt = Vs +RzILOpEffect *hex_il_op_v6_vs32b_nt_npred_ppu(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (!Qv) vmem(Rt+Ii):nt = Vs +RzILOpEffect *hex_il_op_v6_vs32b_nt_nqpred_ai(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (!Qv) vmem(Rx++Ii):nt = Vs +RzILOpEffect *hex_il_op_v6_vs32b_nt_nqpred_pi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (!Qv) vmem(Rx++Mu):nt = Vs +RzILOpEffect *hex_il_op_v6_vs32b_nt_nqpred_ppu(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// vmem(Rx++Ii):nt = Vs +RzILOpEffect *hex_il_op_v6_vs32b_nt_pi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// vmem(Rx++Mu):nt = Vs +RzILOpEffect *hex_il_op_v6_vs32b_nt_ppu(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (Pv) vmem(Rt+Ii):nt = Vs +RzILOpEffect *hex_il_op_v6_vs32b_nt_pred_ai(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (Pv) vmem(Rx++Ii):nt = Vs +RzILOpEffect *hex_il_op_v6_vs32b_nt_pred_pi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (Pv) vmem(Rx++Mu):nt = Vs +RzILOpEffect *hex_il_op_v6_vs32b_nt_pred_ppu(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (Qv) vmem(Rt+Ii):nt = Vs +RzILOpEffect *hex_il_op_v6_vs32b_nt_qpred_ai(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (Qv) vmem(Rx++Ii):nt = Vs +RzILOpEffect *hex_il_op_v6_vs32b_nt_qpred_pi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (Qv) vmem(Rx++Mu):nt = Vs +RzILOpEffect *hex_il_op_v6_vs32b_nt_qpred_ppu(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// vmem(Rx++Ii) = Vs +RzILOpEffect *hex_il_op_v6_vs32b_pi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// vmem(Rx++Mu) = Vs +RzILOpEffect *hex_il_op_v6_vs32b_ppu(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (Pv) vmem(Rt+Ii) = Vs +RzILOpEffect *hex_il_op_v6_vs32b_pred_ai(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (Pv) vmem(Rx++Ii) = Vs +RzILOpEffect *hex_il_op_v6_vs32b_pred_pi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (Pv) vmem(Rx++Mu) = Vs +RzILOpEffect *hex_il_op_v6_vs32b_pred_ppu(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (Qv) vmem(Rt+Ii) = Vs +RzILOpEffect *hex_il_op_v6_vs32b_qpred_ai(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (Qv) vmem(Rx++Ii) = Vs +RzILOpEffect *hex_il_op_v6_vs32b_qpred_pi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (Qv) vmem(Rx++Mu) = Vs +RzILOpEffect *hex_il_op_v6_vs32b_qpred_ppu(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// vmem(Rt+Ii):scatter_release +RzILOpEffect *hex_il_op_v6_vs32b_srls_ai(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// vmem(Rx++Ii):scatter_release +RzILOpEffect *hex_il_op_v6_vs32b_srls_pi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// vmem(Rx++Mu):scatter_release +RzILOpEffect *hex_il_op_v6_vs32b_srls_ppu(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.hf = vabs(Vu.hf) +RzILOpEffect *hex_il_op_v6_vabs_hf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.sf = vabs(Vu.sf) +RzILOpEffect *hex_il_op_v6_vabs_sf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.qf16 = vadd(Vu.hf,Vv.hf) +RzILOpEffect *hex_il_op_v6_vadd_hf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.hf = vadd(Vu.hf,Vv.hf) +RzILOpEffect *hex_il_op_v6_vadd_hf_hf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.qf16 = vadd(Vu.qf16,Vv.qf16) +RzILOpEffect *hex_il_op_v6_vadd_qf16(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.qf16 = vadd(Vu.qf16,Vv.hf) +RzILOpEffect *hex_il_op_v6_vadd_qf16_mix(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.qf32 = vadd(Vu.qf32,Vv.qf32) +RzILOpEffect *hex_il_op_v6_vadd_qf32(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.qf32 = vadd(Vu.qf32,Vv.sf) +RzILOpEffect *hex_il_op_v6_vadd_qf32_mix(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.qf32 = vadd(Vu.sf,Vv.sf) +RzILOpEffect *hex_il_op_v6_vadd_sf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vdd.sf = vadd(Vu.bf,Vv.bf) +RzILOpEffect *hex_il_op_v6_vadd_sf_bf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vdd.sf = vadd(Vu.hf,Vv.hf) +RzILOpEffect *hex_il_op_v6_vadd_sf_hf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.sf = vadd(Vu.sf,Vv.sf) +RzILOpEffect *hex_il_op_v6_vadd_sf_sf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.w = vfmv(Vu.w) +RzILOpEffect *hex_il_op_v6_vassign_fp(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.h = Vu.hf +RzILOpEffect *hex_il_op_v6_vconv_h_hf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.hf = Vu.h +RzILOpEffect *hex_il_op_v6_vconv_hf_h(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.hf = Vu.qf16 +RzILOpEffect *hex_il_op_v6_vconv_hf_qf16(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.hf = Vuu.qf32 +RzILOpEffect *hex_il_op_v6_vconv_hf_qf32(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.sf = Vu.qf32 +RzILOpEffect *hex_il_op_v6_vconv_sf_qf32(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.sf = Vu.w +RzILOpEffect *hex_il_op_v6_vconv_sf_w(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.w = Vu.sf +RzILOpEffect *hex_il_op_v6_vconv_w_sf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.b = vcvt(Vu.hf,Vv.hf) +RzILOpEffect *hex_il_op_v6_vcvt_b_hf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.bf = vcvt(Vu.sf,Vv.sf) +RzILOpEffect *hex_il_op_v6_vcvt_bf_sf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.h = vcvt(Vu.hf) +RzILOpEffect *hex_il_op_v6_vcvt_h_hf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vdd.hf = vcvt(Vu.b) +RzILOpEffect *hex_il_op_v6_vcvt_hf_b(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.hf = vcvt(Vu.h) +RzILOpEffect *hex_il_op_v6_vcvt_hf_h(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.hf = vcvt(Vu.sf,Vv.sf) +RzILOpEffect *hex_il_op_v6_vcvt_hf_sf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vdd.hf = vcvt(Vu.ub) +RzILOpEffect *hex_il_op_v6_vcvt_hf_ub(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.hf = vcvt(Vu.uh) +RzILOpEffect *hex_il_op_v6_vcvt_hf_uh(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vdd.sf = vcvt(Vu.hf) +RzILOpEffect *hex_il_op_v6_vcvt_sf_hf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.ub = vcvt(Vu.hf,Vv.hf) +RzILOpEffect *hex_il_op_v6_vcvt_ub_hf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.uh = vcvt(Vu.hf) +RzILOpEffect *hex_il_op_v6_vcvt_uh_hf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.sf = vdmpy(Vu.hf,Vv.hf) +RzILOpEffect *hex_il_op_v6_vdmpy_sf_hf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vx.sf += vdmpy(Vu.hf,Vv.hf) +RzILOpEffect *hex_il_op_v6_vdmpy_sf_hf_acc(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.hf = vfmax(Vu.hf,Vv.hf) +RzILOpEffect *hex_il_op_v6_vfmax_hf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.sf = vfmax(Vu.sf,Vv.sf) +RzILOpEffect *hex_il_op_v6_vfmax_sf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.hf = vfmin(Vu.hf,Vv.hf) +RzILOpEffect *hex_il_op_v6_vfmin_hf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.sf = vfmin(Vu.sf,Vv.sf) +RzILOpEffect *hex_il_op_v6_vfmin_sf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.hf = vfneg(Vu.hf) +RzILOpEffect *hex_il_op_v6_vfneg_hf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.sf = vfneg(Vu.sf) +RzILOpEffect *hex_il_op_v6_vfneg_sf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Qd = vcmp.gt(Vu.bf,Vv.bf) +RzILOpEffect *hex_il_op_v6_vgtbf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Qx &= vcmp.gt(Vu.bf,Vv.bf) +RzILOpEffect *hex_il_op_v6_vgtbf_and(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Qx |= vcmp.gt(Vu.bf,Vv.bf) +RzILOpEffect *hex_il_op_v6_vgtbf_or(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Qx ^= vcmp.gt(Vu.bf,Vv.bf) +RzILOpEffect *hex_il_op_v6_vgtbf_xor(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Qd = vcmp.gt(Vu.hf,Vv.hf) +RzILOpEffect *hex_il_op_v6_vgthf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Qx &= vcmp.gt(Vu.hf,Vv.hf) +RzILOpEffect *hex_il_op_v6_vgthf_and(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Qx |= vcmp.gt(Vu.hf,Vv.hf) +RzILOpEffect *hex_il_op_v6_vgthf_or(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Qx ^= vcmp.gt(Vu.hf,Vv.hf) +RzILOpEffect *hex_il_op_v6_vgthf_xor(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Qd = vcmp.gt(Vu.sf,Vv.sf) +RzILOpEffect *hex_il_op_v6_vgtsf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Qx &= vcmp.gt(Vu.sf,Vv.sf) +RzILOpEffect *hex_il_op_v6_vgtsf_and(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Qx |= vcmp.gt(Vu.sf,Vv.sf) +RzILOpEffect *hex_il_op_v6_vgtsf_or(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Qx ^= vcmp.gt(Vu.sf,Vv.sf) +RzILOpEffect *hex_il_op_v6_vgtsf_xor(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.bf = vmax(Vu.bf,Vv.bf) +RzILOpEffect *hex_il_op_v6_vmax_bf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.hf = vmax(Vu.hf,Vv.hf) +RzILOpEffect *hex_il_op_v6_vmax_hf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.sf = vmax(Vu.sf,Vv.sf) +RzILOpEffect *hex_il_op_v6_vmax_sf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.bf = vmin(Vu.bf,Vv.bf) +RzILOpEffect *hex_il_op_v6_vmin_bf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.hf = vmin(Vu.hf,Vv.hf) +RzILOpEffect *hex_il_op_v6_vmin_hf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.sf = vmin(Vu.sf,Vv.sf) +RzILOpEffect *hex_il_op_v6_vmin_sf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.hf = vmpy(Vu.hf,Vv.hf) +RzILOpEffect *hex_il_op_v6_vmpy_hf_hf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vx.hf += vmpy(Vu.hf,Vv.hf) +RzILOpEffect *hex_il_op_v6_vmpy_hf_hf_acc(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.qf16 = vmpy(Vu.qf16,Vv.qf16) +RzILOpEffect *hex_il_op_v6_vmpy_qf16(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.qf16 = vmpy(Vu.hf,Vv.hf) +RzILOpEffect *hex_il_op_v6_vmpy_qf16_hf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.qf16 = vmpy(Vu.qf16,Vv.hf) +RzILOpEffect *hex_il_op_v6_vmpy_qf16_mix_hf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.qf32 = vmpy(Vu.qf32,Vv.qf32) +RzILOpEffect *hex_il_op_v6_vmpy_qf32(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vdd.qf32 = vmpy(Vu.hf,Vv.hf) +RzILOpEffect *hex_il_op_v6_vmpy_qf32_hf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vdd.qf32 = vmpy(Vu.qf16,Vv.hf) +RzILOpEffect *hex_il_op_v6_vmpy_qf32_mix_hf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vdd.qf32 = vmpy(Vu.qf16,Vv.qf16) +RzILOpEffect *hex_il_op_v6_vmpy_qf32_qf16(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.qf32 = vmpy(Vu.sf,Vv.sf) +RzILOpEffect *hex_il_op_v6_vmpy_qf32_sf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vdd.sf = vmpy(Vu.bf,Vv.bf) +RzILOpEffect *hex_il_op_v6_vmpy_sf_bf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vxx.sf += vmpy(Vu.bf,Vv.bf) +RzILOpEffect *hex_il_op_v6_vmpy_sf_bf_acc(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vdd.sf = vmpy(Vu.hf,Vv.hf) +RzILOpEffect *hex_il_op_v6_vmpy_sf_hf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vxx.sf += vmpy(Vu.hf,Vv.hf) +RzILOpEffect *hex_il_op_v6_vmpy_sf_hf_acc(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.sf = vmpy(Vu.sf,Vv.sf) +RzILOpEffect *hex_il_op_v6_vmpy_sf_sf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vdd.w = vrmpy(Vu.b,Rtt.ub) +RzILOpEffect *hex_il_op_v6_vrmpybub_rtt(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vxx.w += vrmpy(Vu.b,Rtt.ub) +RzILOpEffect *hex_il_op_v6_vrmpybub_rtt_acc(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vdd.uw = vrmpy(Vu.ub,Rtt.ub) +RzILOpEffect *hex_il_op_v6_vrmpyub_rtt(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vxx.uw += vrmpy(Vu.ub,Rtt.ub) +RzILOpEffect *hex_il_op_v6_vrmpyub_rtt_acc(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vdddd.w = vrmpyz(Vu.b,Rt.b) +RzILOpEffect *hex_il_op_v6_vrmpyzbb_rt(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vyyyy.w += vrmpyz(Vu.b,Rt.b) +RzILOpEffect *hex_il_op_v6_vrmpyzbb_rt_acc(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vdddd.w = vrmpyz(Vu.b,Rx.b++) +RzILOpEffect *hex_il_op_v6_vrmpyzbb_rx(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vyyyy.w += vrmpyz(Vu.b,Rx.b++) +RzILOpEffect *hex_il_op_v6_vrmpyzbb_rx_acc(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vdddd.w = vrmpyz(Vu.b,Rt.ub) +RzILOpEffect *hex_il_op_v6_vrmpyzbub_rt(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vyyyy.w += vrmpyz(Vu.b,Rt.ub) +RzILOpEffect *hex_il_op_v6_vrmpyzbub_rt_acc(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vdddd.w = vrmpyz(Vu.b,Rx.ub++) +RzILOpEffect *hex_il_op_v6_vrmpyzbub_rx(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vyyyy.w += vrmpyz(Vu.b,Rx.ub++) +RzILOpEffect *hex_il_op_v6_vrmpyzbub_rx_acc(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vdddd.w = vr16mpyz(Vu.c,Rt.b) +RzILOpEffect *hex_il_op_v6_vrmpyzcb_rt(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vyyyy.w += vr16mpyz(Vu.c,Rt.b) +RzILOpEffect *hex_il_op_v6_vrmpyzcb_rt_acc(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vdddd.w = vr16mpyz(Vu.c,Rx.b++) +RzILOpEffect *hex_il_op_v6_vrmpyzcb_rx(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vyyyy.w += vr16mpyz(Vu.c,Rx.b++) +RzILOpEffect *hex_il_op_v6_vrmpyzcb_rx_acc(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vdddd.w = vr16mpyzs(Vu.c,Rt.b) +RzILOpEffect *hex_il_op_v6_vrmpyzcbs_rt(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vyyyy.w += vr16mpyzs(Vu.c,Rt.b) +RzILOpEffect *hex_il_op_v6_vrmpyzcbs_rt_acc(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vdddd.w = vr16mpyzs(Vu.c,Rx.b++) +RzILOpEffect *hex_il_op_v6_vrmpyzcbs_rx(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vyyyy.w += vr16mpyzs(Vu.c,Rx.b++) +RzILOpEffect *hex_il_op_v6_vrmpyzcbs_rx_acc(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vdddd.w = vr8mpyz(Vu.n,Rt.b) +RzILOpEffect *hex_il_op_v6_vrmpyznb_rt(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vyyyy.w += vr8mpyz(Vu.n,Rt.b) +RzILOpEffect *hex_il_op_v6_vrmpyznb_rt_acc(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vdddd.w = vr8mpyz(Vu.n,Rx.b++) +RzILOpEffect *hex_il_op_v6_vrmpyznb_rx(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vyyyy.w += vr8mpyz(Vu.n,Rx.b++) +RzILOpEffect *hex_il_op_v6_vrmpyznb_rx_acc(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.qf16 = vsub(Vu.hf,Vv.hf) +RzILOpEffect *hex_il_op_v6_vsub_hf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.hf = vsub(Vu.hf,Vv.hf) +RzILOpEffect *hex_il_op_v6_vsub_hf_hf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.qf16 = vsub(Vu.qf16,Vv.qf16) +RzILOpEffect *hex_il_op_v6_vsub_qf16(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.qf16 = vsub(Vu.qf16,Vv.hf) +RzILOpEffect *hex_il_op_v6_vsub_qf16_mix(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.qf32 = vsub(Vu.qf32,Vv.qf32) +RzILOpEffect *hex_il_op_v6_vsub_qf32(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.qf32 = vsub(Vu.qf32,Vv.sf) +RzILOpEffect *hex_il_op_v6_vsub_qf32_mix(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.qf32 = vsub(Vu.sf,Vv.sf) +RzILOpEffect *hex_il_op_v6_vsub_sf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vdd.sf = vsub(Vu.bf,Vv.bf) +RzILOpEffect *hex_il_op_v6_vsub_sf_bf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vdd.sf = vsub(Vu.hf,Vv.hf) +RzILOpEffect *hex_il_op_v6_vsub_sf_hf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd.sf = vsub(Vu.sf,Vv.sf) +RzILOpEffect *hex_il_op_v6_vsub_sf_sf(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// z = vmem(Rt+Ii) +RzILOpEffect *hex_il_op_v6_zld_ai(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// z = vmem(Rx++Ii) +RzILOpEffect *hex_il_op_v6_zld_pi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// z = vmem(Rx++Mu) +RzILOpEffect *hex_il_op_v6_zld_ppu(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (Pv) z = vmem(Rt+Ii) +RzILOpEffect *hex_il_op_v6_zld_pred_ai(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (Pv) z = vmem(Rx++Ii) +RzILOpEffect *hex_il_op_v6_zld_pred_pi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// if (Pv) z = vmem(Rx++Mu) +RzILOpEffect *hex_il_op_v6_zld_pred_ppu(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Vd = zextract(Rt) +RzILOpEffect *hex_il_op_v6_zextract(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_Y2_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_Y2_ops.c new file mode 100644 index 00000000000..5f50c9a7db5 --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_Y2_ops.c @@ -0,0 +1,247 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// barrier +RzILOpEffect *hex_il_op_y2_barrier(HexInsnPktBundle *bundle) { + return NOP(); +} + +// brkpt +RzILOpEffect *hex_il_op_y2_break(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// ciad(Rs) +RzILOpEffect *hex_il_op_y2_ciad(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// crswap(Rx,sgp0) +RzILOpEffect *hex_il_op_y2_crswap0(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// cswi(Rs) +RzILOpEffect *hex_il_op_y2_cswi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// dccleana(Rs) +RzILOpEffect *hex_il_op_y2_dccleana(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// dccleanidx(Rs) +RzILOpEffect *hex_il_op_y2_dccleanidx(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// dccleaninva(Rs) +RzILOpEffect *hex_il_op_y2_dccleaninva(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// dccleaninvidx(Rs) +RzILOpEffect *hex_il_op_y2_dccleaninvidx(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// dcfetch(Rs+Ii) +RzILOpEffect *hex_il_op_y2_dcfetchbo(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// dcinva(Rs) +RzILOpEffect *hex_il_op_y2_dcinva(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// dcinvidx(Rs) +RzILOpEffect *hex_il_op_y2_dcinvidx(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// dckill +RzILOpEffect *hex_il_op_y2_dckill(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = dctagr(Rs) +RzILOpEffect *hex_il_op_y2_dctagr(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// dctagw(Rs,Rt) +RzILOpEffect *hex_il_op_y2_dctagw(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// dczeroa(Rs) +RzILOpEffect *hex_il_op_y2_dczeroa(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = getimask(Rs) +RzILOpEffect *hex_il_op_y2_getimask(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = iassignr(Rs) +RzILOpEffect *hex_il_op_y2_iassignr(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// iassignw(Rs) +RzILOpEffect *hex_il_op_y2_iassignw(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = icdatar(Rs) +RzILOpEffect *hex_il_op_y2_icdatar(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// icdataw(Rs,Rt) +RzILOpEffect *hex_il_op_y2_icdataw(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// icinva(Rs) +RzILOpEffect *hex_il_op_y2_icinva(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// icinvidx(Rs) +RzILOpEffect *hex_il_op_y2_icinvidx(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// ickill +RzILOpEffect *hex_il_op_y2_ickill(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = ictagr(Rs) +RzILOpEffect *hex_il_op_y2_ictagr(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// ictagw(Rs,Rt) +RzILOpEffect *hex_il_op_y2_ictagw(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// isync +RzILOpEffect *hex_il_op_y2_isync(HexInsnPktBundle *bundle) { + return NOP(); +} + +// k0lock +RzILOpEffect *hex_il_op_y2_k0lock(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// k0unlock +RzILOpEffect *hex_il_op_y2_k0unlock(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// l2cleaninvidx(Rs) +RzILOpEffect *hex_il_op_y2_l2cleaninvidx(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// l2kill +RzILOpEffect *hex_il_op_y2_l2kill(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// resume(Rs) +RzILOpEffect *hex_il_op_y2_resume(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// setimask(Pt,Rs) +RzILOpEffect *hex_il_op_y2_setimask(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// setprio(Pt,Rs) +RzILOpEffect *hex_il_op_y2_setprio(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// start(Rs) +RzILOpEffect *hex_il_op_y2_start(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// stop(Rs) +RzILOpEffect *hex_il_op_y2_stop(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// swi(Rs) +RzILOpEffect *hex_il_op_y2_swi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// syncht +RzILOpEffect *hex_il_op_y2_syncht(HexInsnPktBundle *bundle) { + return NOP(); +} + +// Rd = Ss +RzILOpEffect *hex_il_op_y2_tfrscrr(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Sd = Rs +RzILOpEffect *hex_il_op_y2_tfrsrcr(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// tlblock +RzILOpEffect *hex_il_op_y2_tlblock(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = tlbp(Rs) +RzILOpEffect *hex_il_op_y2_tlbp(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rdd = tlbr(Rs) +RzILOpEffect *hex_il_op_y2_tlbr(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// tlbunlock +RzILOpEffect *hex_il_op_y2_tlbunlock(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// tlbw(Rss,Rt) +RzILOpEffect *hex_il_op_y2_tlbw(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// wait(Rs) +RzILOpEffect *hex_il_op_y2_wait(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_Y4_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_Y4_ops.c new file mode 100644 index 00000000000..a1232b24475 --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_Y4_ops.c @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// crswap(Rx,sgp1) +RzILOpEffect *hex_il_op_y4_crswap1(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// crswap(Rxx,sgp1:0) +RzILOpEffect *hex_il_op_y4_crswap10(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// l2fetch(Rs,Rt) +RzILOpEffect *hex_il_op_y4_l2fetch(HexInsnPktBundle *bundle) { + return NOP(); +} + +// Rd = l2tagr(Rs) +RzILOpEffect *hex_il_op_y4_l2tagr(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// l2tagw(Rs,Rt) +RzILOpEffect *hex_il_op_y4_l2tagw(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// nmi(Rs) +RzILOpEffect *hex_il_op_y4_nmi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// siad(Rs) +RzILOpEffect *hex_il_op_y4_siad(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rdd = Sss +RzILOpEffect *hex_il_op_y4_tfrscpp(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Sdd = Rss +RzILOpEffect *hex_il_op_y4_tfrspcp(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// trace(Rs) +RzILOpEffect *hex_il_op_y4_trace(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_Y5_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_Y5_ops.c new file mode 100644 index 00000000000..b86c13d0a47 --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_Y5_ops.c @@ -0,0 +1,72 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// Rd = ctlbw(Rss,Rt) +RzILOpEffect *hex_il_op_y5_ctlbw(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// l2cleanidx(Rs) +RzILOpEffect *hex_il_op_y5_l2cleanidx(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// l2fetch(Rs,Rtt) +RzILOpEffect *hex_il_op_y5_l2fetch(HexInsnPktBundle *bundle) { + return NOP(); +} + +// l2gclean +RzILOpEffect *hex_il_op_y5_l2gclean(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// l2gcleaninv +RzILOpEffect *hex_il_op_y5_l2gcleaninv(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// l2gunlock +RzILOpEffect *hex_il_op_y5_l2gunlock(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// l2invidx(Rs) +RzILOpEffect *hex_il_op_y5_l2invidx(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Pd = l2locka(Rs) +RzILOpEffect *hex_il_op_y5_l2locka(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// l2unlocka(Rs) +RzILOpEffect *hex_il_op_y5_l2unlocka(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// tlbinvasid(Rs) +RzILOpEffect *hex_il_op_y5_tlbasidi(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = tlboc(Rss) +RzILOpEffect *hex_il_op_y5_tlboc(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_Y6_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_Y6_ops.c new file mode 100644 index 00000000000..4992754d359 --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_Y6_ops.c @@ -0,0 +1,62 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// diag(Rs) +RzILOpEffect *hex_il_op_y6_diag(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// diag0(Rss,Rtt) +RzILOpEffect *hex_il_op_y6_diag0(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// diag1(Rss,Rtt) +RzILOpEffect *hex_il_op_y6_diag1(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// dmlink(Rs,Rt) +RzILOpEffect *hex_il_op_y6_dmlink(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = dmpause +RzILOpEffect *hex_il_op_y6_dmpause(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = dmpoll +RzILOpEffect *hex_il_op_y6_dmpoll(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// dmresume(Rs) +RzILOpEffect *hex_il_op_y6_dmresume(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// dmstart(Rs) +RzILOpEffect *hex_il_op_y6_dmstart(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = dmwait +RzILOpEffect *hex_il_op_y6_dmwait(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_dep_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_dep_ops.c new file mode 100644 index 00000000000..3ff0cf18125 --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_dep_ops.c @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// Rd = add(Rs,Rt):sat:deprecated +RzILOpEffect *hex_il_op_dep_a2_addsat(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rd = sub(Rt,Rs):sat:deprecated +RzILOpEffect *hex_il_op_dep_a2_subsat(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +// Rdd = packhl(Rs,Rt):deprecated +RzILOpEffect *hex_il_op_dep_s2_packhl(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_invalid_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_invalid_ops.c new file mode 100644 index 00000000000..a0941dfb43e --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_invalid_ops.c @@ -0,0 +1,22 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// +RzILOpEffect *hex_il_op_invalid_decode(HexInsnPktBundle *bundle) { + NOT_IMPLEMENTED; +} + +#include \ No newline at end of file diff --git a/librz/arch/isa/hexagon/il_ops/hexagon_il_non_insn_ops.c b/librz/arch/isa/hexagon/il_ops/hexagon_il_non_insn_ops.c new file mode 100644 index 00000000000..919cc790200 --- /dev/null +++ b/librz/arch/isa/hexagon/il_ops/hexagon_il_non_insn_ops.c @@ -0,0 +1,960 @@ +// SPDX-FileCopyrightText: 2021 Rot127 +// SPDX-License-Identifier: LGPL-3.0-only + +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 +//======================================== +// The following code is generated. +// Do not edit. Repository of code generator: +// https://github.com/rizinorg/rz-hexagon + +#include +#include "../hexagon_il.h" +#include +#include + +// No syntax +RzILOpEffect *hex_il_op_j2_endloop01(HexInsnPktBundle *bundle) { + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P3_op = EXPLICIT2OP(3, HEX_REG_CLASS_PRED_REGS, false); + const HexOp lc0_op = ALIAS2OP(HEX_REG_ALIAS_LC0, false); + const HexOp sa0_op = ALIAS2OP(HEX_REG_ALIAS_SA0, false); + RzILOpPure *sa0 = READ_REG(pkt, &sa0_op, false); + const HexOp lc1_op = ALIAS2OP(HEX_REG_ALIAS_LC1, false); + const HexOp sa1_op = ALIAS2OP(HEX_REG_ALIAS_SA1, false); + RzILOpPure *sa1 = READ_REG(pkt, &sa1_op, false); + + // get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); + RzILOpEffect *get_usr_field_call_0 = hex_get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); + + // h_tmp620 = get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); + RzILOpEffect *op_ASSIGN_hybrid_tmp_2 = SETL("h_tmp620", UNSIGNED(32, VARL("ret_val"))); + + // seq(get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); h_tmp620 = g ...; + RzILOpEffect *seq_3 = SEQN(2, get_usr_field_call_0, op_ASSIGN_hybrid_tmp_2); + + // get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); + RzILOpEffect *get_usr_field_call_4 = hex_get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); + + // h_tmp621 = get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); + RzILOpEffect *op_ASSIGN_hybrid_tmp_6 = SETL("h_tmp621", UNSIGNED(32, VARL("ret_val"))); + + // seq(get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); h_tmp621 = g ...; + RzILOpEffect *seq_7 = SEQN(2, get_usr_field_call_4, op_ASSIGN_hybrid_tmp_6); + + // get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); + RzILOpEffect *get_usr_field_call_12 = hex_get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); + + // h_tmp622 = get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); + RzILOpEffect *op_ASSIGN_hybrid_tmp_14 = SETL("h_tmp622", UNSIGNED(32, VARL("ret_val"))); + + // seq(get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); h_tmp622 = g ...; + RzILOpEffect *seq_15 = SEQN(2, get_usr_field_call_12, op_ASSIGN_hybrid_tmp_14); + + // P3 = ((st8) 0xff); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, &P3_op, CAST(8, MSB(SN(32, 0xff)), SN(32, 0xff))); + + // seq(P3 = ((st8) 0xff)); + RzILOpEffect *seq_then_24 = op_ASSIGN_22; + + // if ((h_tmp622 == ((ut32) 0x1))) {seq(P3 = ((st8) 0xff))} else {{}}; + RzILOpPure *op_EQ_18 = EQ(VARL("h_tmp622"), CAST(32, IL_FALSE, SN(32, 1))); + RzILOpEffect *branch_25 = BRANCH(op_EQ_18, seq_then_24, EMPTY()); + + // seq(seq(get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); h_tmp622 ...; + RzILOpEffect *seq_26 = SEQN(2, seq_15, branch_25); + + // seq({}); + RzILOpEffect *seq_then_27 = EMPTY(); + + // seq(seq(seq(get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); h_tm ...; + RzILOpEffect *seq_else_28 = seq_26; + + // if ((h_tmp621 >= ((ut32) 0x2))) {seq({})} else {seq(seq(seq(get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); h_tm ...}; + RzILOpPure *op_GE_10 = UGE(VARL("h_tmp621"), CAST(32, IL_FALSE, SN(32, 2))); + RzILOpEffect *branch_29 = BRANCH(op_GE_10, seq_then_27, seq_else_28); + + // seq(seq(get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); h_tmp621 ...; + RzILOpEffect *seq_30 = SEQN(2, seq_7, branch_29); + + // get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); + RzILOpEffect *get_usr_field_call_31 = hex_get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); + + // h_tmp623 = get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); + RzILOpEffect *op_ASSIGN_hybrid_tmp_33 = SETL("h_tmp623", UNSIGNED(32, VARL("ret_val"))); + + // seq(get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); h_tmp623 = g ...; + RzILOpEffect *seq_34 = SEQN(2, get_usr_field_call_31, op_ASSIGN_hybrid_tmp_33); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG, h_tmp623 - ((ut32) 0x1)); + RzILOpPure *op_SUB_37 = SUB(VARL("h_tmp623"), CAST(32, IL_FALSE, SN(32, 1))); + RzILOpEffect *set_usr_field_call_38 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG, op_SUB_37); + + // seq(seq(seq(get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); h_tm ...; + RzILOpEffect *seq_then_39 = SEQN(2, seq_30, set_usr_field_call_38); + + // seq(seq(get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); h_tmp623 ...; + RzILOpEffect *seq_40 = SEQN(2, seq_34, seq_then_39); + + // if (h_tmp620) {seq(seq(get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); h_tmp623 ...} else {{}}; + RzILOpEffect *branch_41 = BRANCH(NON_ZERO(VARL("h_tmp620")), seq_40, EMPTY()); + + // seq(seq(get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); h_tmp620 ...; + RzILOpEffect *seq_42 = SEQN(2, seq_3, branch_41); + + // jump(sa0); + RzILOpEffect *jump_sa0_48 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", sa0)); + + // lc0 = lc0 - ((ut32) 0x1); + RzILOpPure *op_SUB_52 = SUB(READ_REG(pkt, &lc0_op, true), CAST(32, IL_FALSE, SN(32, 1))); + RzILOpEffect *op_ASSIGN_53 = WRITE_REG(bundle, &lc0_op, op_SUB_52); + + // jump(sa1); + RzILOpEffect *jump_sa1_59 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", sa1)); + + // lc1 = lc1 - ((ut32) 0x1); + RzILOpPure *op_SUB_63 = SUB(READ_REG(pkt, &lc1_op, true), CAST(32, IL_FALSE, SN(32, 1))); + RzILOpEffect *op_ASSIGN_64 = WRITE_REG(bundle, &lc1_op, op_SUB_63); + + // seq(jump(sa1); lc1 = lc1 - ((ut32) 0x1)); + RzILOpEffect *seq_then_65 = SEQN(2, jump_sa1_59, op_ASSIGN_64); + + // if ((lc1 > ((ut32) 0x1))) {seq(jump(sa1); lc1 = lc1 - ((ut32) 0x1))} else {{}}; + RzILOpPure *op_GT_57 = UGT(READ_REG(pkt, &lc1_op, true), CAST(32, IL_FALSE, SN(32, 1))); + RzILOpEffect *branch_66 = BRANCH(op_GT_57, seq_then_65, EMPTY()); + + // seq(jump(sa0); lc0 = lc0 - ((ut32) 0x1)); + RzILOpEffect *seq_then_67 = SEQN(2, jump_sa0_48, op_ASSIGN_53); + + // seq(if ((lc1 > ((ut32) 0x1))) {seq(jump(sa1); lc1 = lc1 - ((ut32 ...; + RzILOpEffect *seq_else_68 = branch_66; + + // if ((lc0 > ((ut32) 0x1))) {seq(jump(sa0); lc0 = lc0 - ((ut32) 0x1))} else {seq(if ((lc1 > ((ut32) 0x1))) {seq(jump(sa1); lc1 = lc1 - ((ut32 ...}; + RzILOpPure *op_GT_46 = UGT(READ_REG(pkt, &lc0_op, true), CAST(32, IL_FALSE, SN(32, 1))); + RzILOpEffect *branch_69 = BRANCH(op_GT_46, seq_then_67, seq_else_68); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_42, branch_69); + return instruction_sequence; +} + +// No syntax +RzILOpEffect *hex_il_op_j2_endloop1(HexInsnPktBundle *bundle) { + HexPkt *pkt = bundle->pkt; + // READ + const HexOp lc1_op = ALIAS2OP(HEX_REG_ALIAS_LC1, false); + const HexOp sa1_op = ALIAS2OP(HEX_REG_ALIAS_SA1, false); + RzILOpPure *sa1 = READ_REG(pkt, &sa1_op, false); + + // jump(sa1); + RzILOpEffect *jump_sa1_5 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", sa1)); + + // lc1 = lc1 - ((ut32) 0x1); + RzILOpPure *op_SUB_9 = SUB(READ_REG(pkt, &lc1_op, true), CAST(32, IL_FALSE, SN(32, 1))); + RzILOpEffect *op_ASSIGN_10 = WRITE_REG(bundle, &lc1_op, op_SUB_9); + + // seq(jump(sa1); lc1 = lc1 - ((ut32) 0x1)); + RzILOpEffect *seq_then_11 = SEQN(2, jump_sa1_5, op_ASSIGN_10); + + // if ((lc1 > ((ut32) 0x1))) {seq(jump(sa1); lc1 = lc1 - ((ut32) 0x1))} else {{}}; + RzILOpPure *op_GT_3 = UGT(READ_REG(pkt, &lc1_op, true), CAST(32, IL_FALSE, SN(32, 1))); + RzILOpEffect *branch_12 = BRANCH(op_GT_3, seq_then_11, EMPTY()); + + RzILOpEffect *instruction_sequence = branch_12; + return instruction_sequence; +} + +// No syntax +RzILOpEffect *hex_il_op_j2_endloop0(HexInsnPktBundle *bundle) { + HexPkt *pkt = bundle->pkt; + // READ + const HexOp P3_op = EXPLICIT2OP(3, HEX_REG_CLASS_PRED_REGS, false); + const HexOp lc0_op = ALIAS2OP(HEX_REG_ALIAS_LC0, false); + const HexOp sa0_op = ALIAS2OP(HEX_REG_ALIAS_SA0, false); + RzILOpPure *sa0 = READ_REG(pkt, &sa0_op, false); + + // get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); + RzILOpEffect *get_usr_field_call_0 = hex_get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); + + // h_tmp624 = get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); + RzILOpEffect *op_ASSIGN_hybrid_tmp_2 = SETL("h_tmp624", UNSIGNED(32, VARL("ret_val"))); + + // seq(get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); h_tmp624 = g ...; + RzILOpEffect *seq_3 = SEQN(2, get_usr_field_call_0, op_ASSIGN_hybrid_tmp_2); + + // get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); + RzILOpEffect *get_usr_field_call_4 = hex_get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); + + // h_tmp625 = get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); + RzILOpEffect *op_ASSIGN_hybrid_tmp_6 = SETL("h_tmp625", UNSIGNED(32, VARL("ret_val"))); + + // seq(get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); h_tmp625 = g ...; + RzILOpEffect *seq_7 = SEQN(2, get_usr_field_call_4, op_ASSIGN_hybrid_tmp_6); + + // get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); + RzILOpEffect *get_usr_field_call_12 = hex_get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); + + // h_tmp626 = get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); + RzILOpEffect *op_ASSIGN_hybrid_tmp_14 = SETL("h_tmp626", UNSIGNED(32, VARL("ret_val"))); + + // seq(get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); h_tmp626 = g ...; + RzILOpEffect *seq_15 = SEQN(2, get_usr_field_call_12, op_ASSIGN_hybrid_tmp_14); + + // P3 = ((st8) 0xff); + RzILOpEffect *op_ASSIGN_22 = WRITE_REG(bundle, &P3_op, CAST(8, MSB(SN(32, 0xff)), SN(32, 0xff))); + + // seq(P3 = ((st8) 0xff)); + RzILOpEffect *seq_then_24 = op_ASSIGN_22; + + // if ((h_tmp626 == ((ut32) 0x1))) {seq(P3 = ((st8) 0xff))} else {{}}; + RzILOpPure *op_EQ_18 = EQ(VARL("h_tmp626"), CAST(32, IL_FALSE, SN(32, 1))); + RzILOpEffect *branch_25 = BRANCH(op_EQ_18, seq_then_24, EMPTY()); + + // seq(seq(get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); h_tmp626 ...; + RzILOpEffect *seq_26 = SEQN(2, seq_15, branch_25); + + // seq({}); + RzILOpEffect *seq_then_27 = EMPTY(); + + // seq(seq(seq(get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); h_tm ...; + RzILOpEffect *seq_else_28 = seq_26; + + // if ((h_tmp625 >= ((ut32) 0x2))) {seq({})} else {seq(seq(seq(get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); h_tm ...}; + RzILOpPure *op_GE_10 = UGE(VARL("h_tmp625"), CAST(32, IL_FALSE, SN(32, 2))); + RzILOpEffect *branch_29 = BRANCH(op_GE_10, seq_then_27, seq_else_28); + + // seq(seq(get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); h_tmp625 ...; + RzILOpEffect *seq_30 = SEQN(2, seq_7, branch_29); + + // get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); + RzILOpEffect *get_usr_field_call_31 = hex_get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); + + // h_tmp627 = get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); + RzILOpEffect *op_ASSIGN_hybrid_tmp_33 = SETL("h_tmp627", UNSIGNED(32, VARL("ret_val"))); + + // seq(get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); h_tmp627 = g ...; + RzILOpEffect *seq_34 = SEQN(2, get_usr_field_call_31, op_ASSIGN_hybrid_tmp_33); + + // set_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG, h_tmp627 - ((ut32) 0x1)); + RzILOpPure *op_SUB_37 = SUB(VARL("h_tmp627"), CAST(32, IL_FALSE, SN(32, 1))); + RzILOpEffect *set_usr_field_call_38 = hex_set_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG, op_SUB_37); + + // seq(seq(seq(get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); h_tm ...; + RzILOpEffect *seq_then_39 = SEQN(2, seq_30, set_usr_field_call_38); + + // seq(seq(get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); h_tmp627 ...; + RzILOpEffect *seq_40 = SEQN(2, seq_34, seq_then_39); + + // if (h_tmp624) {seq(seq(get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); h_tmp627 ...} else {{}}; + RzILOpEffect *branch_41 = BRANCH(NON_ZERO(VARL("h_tmp624")), seq_40, EMPTY()); + + // seq(seq(get_usr_field(bundle, HEX_REG_FIELD_USR_LPCFG); h_tmp624 ...; + RzILOpEffect *seq_42 = SEQN(2, seq_3, branch_41); + + // jump(sa0); + RzILOpEffect *jump_sa0_48 = SEQ2(SETL("jump_flag", IL_TRUE), SETL("jump_target", sa0)); + + // lc0 = lc0 - ((ut32) 0x1); + RzILOpPure *op_SUB_52 = SUB(READ_REG(pkt, &lc0_op, true), CAST(32, IL_FALSE, SN(32, 1))); + RzILOpEffect *op_ASSIGN_53 = WRITE_REG(bundle, &lc0_op, op_SUB_52); + + // seq(jump(sa0); lc0 = lc0 - ((ut32) 0x1)); + RzILOpEffect *seq_then_54 = SEQN(2, jump_sa0_48, op_ASSIGN_53); + + // if ((lc0 > ((ut32) 0x1))) {seq(jump(sa0); lc0 = lc0 - ((ut32) 0x1))} else {{}}; + RzILOpPure *op_GT_46 = UGT(READ_REG(pkt, &lc0_op, true), CAST(32, IL_FALSE, SN(32, 1))); + RzILOpEffect *branch_55 = BRANCH(op_GT_46, seq_then_54, EMPTY()); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_42, branch_55); + return instruction_sequence; +} + +RZ_OWN RzILOpEffect *hex_fcirc_add(HexInsnPktBundle *bundle, const HexOp *RxV, RZ_BORROW RzILOpPure *offset, RZ_BORROW RzILOpPure *M, RZ_BORROW RzILOpPure *CS) { + const HexInsn *hi = bundle->insn; + HexPkt *pkt = bundle->pkt; + + // READ + // Declare: ut32 K_const; + // Declare: ut32 length; + const HexOp *Rx_op = ISA2REG(hi, 'x', false); + + // Declare: ut32 new_ptr; + // Declare: ut32 start_addr; + // Declare: ut32 end_addr; + // Declare: st32 mask; + + // K_const = extract32(((ut32) M), 0x18, 0x4); + RzILOpEffect *op_ASSIGN_5 = SETL("K_const", EXTRACT32(CAST(32, IL_FALSE, M), SN(32, 24), SN(32, 4))); + + // length = extract32(((ut32) M), 0x0, 0x11); + RzILOpEffect *op_ASSIGN_11 = SETL("length", EXTRACT32(CAST(32, IL_FALSE, DUP(M)), SN(32, 0), SN(32, 17))); + + // new_ptr = ((ut32) Rx + offset); + RzILOpPure *op_ADD_13 = ADD(READ_REG(pkt, Rx_op, false), offset); + RzILOpEffect *op_ASSIGN_15 = SETL("new_ptr", CAST(32, IL_FALSE, op_ADD_13)); + + // start_addr = ((ut32) CS); + RzILOpEffect *op_ASSIGN_27 = SETL("start_addr", CAST(32, IL_FALSE, CS)); + + // end_addr = start_addr + length; + RzILOpPure *op_ADD_28 = ADD(VARL("start_addr"), VARL("length")); + RzILOpEffect *op_ASSIGN_29 = SETL("end_addr", op_ADD_28); + + // mask = (0x1 << K_const + ((ut32) 0x2)) - 0x1; + RzILOpPure *op_ADD_33 = ADD(VARL("K_const"), CAST(32, IL_FALSE, SN(32, 2))); + RzILOpPure *op_LSHIFT_34 = SHIFTL0(SN(32, 1), op_ADD_33); + RzILOpPure *op_SUB_36 = SUB(op_LSHIFT_34, SN(32, 1)); + RzILOpEffect *op_ASSIGN_38 = SETL("mask", op_SUB_36); + + // start_addr = ((ut32) (Rx & (~mask))); + RzILOpPure *op_NOT_39 = LOGNOT(VARL("mask")); + RzILOpPure *op_AND_40 = LOGAND(READ_REG(pkt, Rx_op, false), op_NOT_39); + RzILOpEffect *op_ASSIGN_42 = SETL("start_addr", CAST(32, IL_FALSE, op_AND_40)); + + // end_addr = (start_addr | length); + RzILOpPure *op_OR_43 = LOGOR(VARL("start_addr"), VARL("length")); + RzILOpEffect *op_ASSIGN_44 = SETL("end_addr", op_OR_43); + + // seq(start_addr = ((ut32) CS); end_addr = start_addr + length); + RzILOpEffect *seq_then_45 = SEQN(2, op_ASSIGN_27, op_ASSIGN_29); + + // seq(mask = (0x1 << K_const + ((ut32) 0x2)) - 0x1; start_addr = ( ...; + RzILOpEffect *seq_else_46 = SEQN(3, op_ASSIGN_38, op_ASSIGN_42, op_ASSIGN_44); + + // if (((K_const == ((ut32) 0x0)) && (length >= ((ut32) 0x4)))) {seq(start_addr = ((ut32) CS); end_addr = start_addr + length)} else {seq(mask = (0x1 << K_const + ((ut32) 0x2)) - 0x1; start_addr = ( ...}; + RzILOpPure *op_EQ_21 = EQ(VARL("K_const"), CAST(32, IL_FALSE, SN(32, 0))); + RzILOpPure *op_GE_24 = UGE(VARL("length"), CAST(32, IL_FALSE, SN(32, 4))); + RzILOpPure *op_AND_25 = AND(op_EQ_21, op_GE_24); + RzILOpEffect *branch_47 = BRANCH(op_AND_25, seq_then_45, seq_else_46); + + // new_ptr = new_ptr - length; + RzILOpPure *op_SUB_49 = SUB(VARL("new_ptr"), VARL("length")); + RzILOpEffect *op_ASSIGN_SUB_50 = SETL("new_ptr", op_SUB_49); + + // new_ptr = new_ptr + length; + RzILOpPure *op_ADD_52 = ADD(VARL("new_ptr"), VARL("length")); + RzILOpEffect *op_ASSIGN_ADD_53 = SETL("new_ptr", op_ADD_52); + + // seq(new_ptr = new_ptr + length); + RzILOpEffect *seq_then_54 = op_ASSIGN_ADD_53; + + // if ((new_ptr < start_addr)) {seq(new_ptr = new_ptr + length)} else {{}}; + RzILOpPure *op_LT_51 = ULT(VARL("new_ptr"), VARL("start_addr")); + RzILOpEffect *branch_55 = BRANCH(op_LT_51, seq_then_54, EMPTY()); + + // seq(new_ptr = new_ptr - length); + RzILOpEffect *seq_then_56 = op_ASSIGN_SUB_50; + + // seq(if ((new_ptr < start_addr)) {seq(new_ptr = new_ptr + length) ...; + RzILOpEffect *seq_else_57 = branch_55; + + // if ((new_ptr >= end_addr)) {seq(new_ptr = new_ptr - length)} else {seq(if ((new_ptr < start_addr)) {seq(new_ptr = new_ptr + length) ...}; + RzILOpPure *op_GE_48 = UGE(VARL("new_ptr"), VARL("end_addr")); + RzILOpEffect *branch_58 = BRANCH(op_GE_48, seq_then_56, seq_else_57); + + // Rx = ((st32) new_ptr); + RzILOpEffect *op_ASSIGN_60 = WRITE_REG(bundle, Rx_op, CAST(32, IL_FALSE, VARL("new_ptr"))); + + // ret_val_st32 = ((ut64) new_ptr); + RzILOpEffect *set_return_val_63 = SETL("ret_val", CAST(64, IL_FALSE, VARL("new_ptr"))); + + RzILOpEffect *instruction_sequence = SEQN(7, op_ASSIGN_5, op_ASSIGN_11, op_ASSIGN_15, branch_47, branch_58, op_ASSIGN_60, set_return_val_63); + return instruction_sequence; +} + +RZ_OWN RzILOpEffect *hex_trap(RZ_BORROW RzILOpPure *trap_type, RZ_BORROW RzILOpPure *imm) { + + // READ + // Declare: ut32 dummy; + + // dummy = ((ut32) trap_type) + imm; + RzILOpPure *op_ADD_1 = ADD(CAST(32, IL_FALSE, trap_type), imm); + RzILOpEffect *op_ASSIGN_3 = SETL("dummy", op_ADD_1); + + RzILOpEffect *instruction_sequence = op_ASSIGN_3; + return instruction_sequence; +} + +RZ_OWN RzILOpEffect *hex_clz32(RZ_BORROW RzILOpPure *t) { + + // READ + // Declare: ut32 clz32_x; + // Declare: ut32 clz32_n; + + // clz32_x = t; + RzILOpEffect *op_ASSIGN_1 = SETL("clz32_x", t); + + // ret_val_ut32 = ((ut64) 0x20); + RzILOpEffect *set_return_val_8 = SETL("ret_val", CAST(64, IL_FALSE, SN(32, 0x20))); + + // clz32_n = ((ut32) 0x0); + RzILOpEffect *op_ASSIGN_11 = SETL("clz32_n", CAST(32, IL_FALSE, SN(32, 0))); + + // clz32_n = clz32_n + ((ut32) 0x10); + RzILOpPure *op_ADD_17 = ADD(VARL("clz32_n"), CAST(32, IL_FALSE, SN(32, 16))); + RzILOpEffect *op_ASSIGN_ADD_18 = SETL("clz32_n", op_ADD_17); + + // clz32_x = (clz32_x << 0x10); + RzILOpPure *op_SHIFTL_20 = SHIFTL0(VARL("clz32_x"), SN(32, 16)); + RzILOpEffect *op_ASSIGN_LEFT_21 = SETL("clz32_x", op_SHIFTL_20); + + // seq(clz32_n = clz32_n + ((ut32) 0x10); clz32_x = (clz32_x << 0x1 ...; + RzILOpEffect *seq_then_22 = SEQN(2, op_ASSIGN_ADD_18, op_ASSIGN_LEFT_21); + + // if ((clz32_x <= 0xffff)) {seq(clz32_n = clz32_n + ((ut32) 0x10); clz32_x = (clz32_x << 0x1 ...} else {{}}; + RzILOpPure *op_LE_14 = ULE(VARL("clz32_x"), UN(32, 0xffff)); + RzILOpEffect *branch_23 = BRANCH(op_LE_14, seq_then_22, EMPTY()); + + // clz32_n = clz32_n + ((ut32) 0x8); + RzILOpPure *op_ADD_28 = ADD(VARL("clz32_n"), CAST(32, IL_FALSE, SN(32, 8))); + RzILOpEffect *op_ASSIGN_ADD_29 = SETL("clz32_n", op_ADD_28); + + // clz32_x = (clz32_x << 0x8); + RzILOpPure *op_SHIFTL_31 = SHIFTL0(VARL("clz32_x"), SN(32, 8)); + RzILOpEffect *op_ASSIGN_LEFT_32 = SETL("clz32_x", op_SHIFTL_31); + + // seq(clz32_n = clz32_n + ((ut32) 0x8); clz32_x = (clz32_x << 0x8)); + RzILOpEffect *seq_then_33 = SEQN(2, op_ASSIGN_ADD_29, op_ASSIGN_LEFT_32); + + // if ((clz32_x <= 0xffffff)) {seq(clz32_n = clz32_n + ((ut32) 0x8); clz32_x = (clz32_x << 0x8))} else {{}}; + RzILOpPure *op_LE_25 = ULE(VARL("clz32_x"), UN(32, 0xffffff)); + RzILOpEffect *branch_34 = BRANCH(op_LE_25, seq_then_33, EMPTY()); + + // clz32_n = clz32_n + ((ut32) 0x4); + RzILOpPure *op_ADD_39 = ADD(VARL("clz32_n"), CAST(32, IL_FALSE, SN(32, 4))); + RzILOpEffect *op_ASSIGN_ADD_40 = SETL("clz32_n", op_ADD_39); + + // clz32_x = (clz32_x << 0x4); + RzILOpPure *op_SHIFTL_42 = SHIFTL0(VARL("clz32_x"), SN(32, 4)); + RzILOpEffect *op_ASSIGN_LEFT_43 = SETL("clz32_x", op_SHIFTL_42); + + // seq(clz32_n = clz32_n + ((ut32) 0x4); clz32_x = (clz32_x << 0x4)); + RzILOpEffect *seq_then_44 = SEQN(2, op_ASSIGN_ADD_40, op_ASSIGN_LEFT_43); + + // if ((clz32_x <= 0xfffffff)) {seq(clz32_n = clz32_n + ((ut32) 0x4); clz32_x = (clz32_x << 0x4))} else {{}}; + RzILOpPure *op_LE_36 = ULE(VARL("clz32_x"), UN(32, 0xfffffff)); + RzILOpEffect *branch_45 = BRANCH(op_LE_36, seq_then_44, EMPTY()); + + // clz32_n = clz32_n + ((ut32) 0x2); + RzILOpPure *op_ADD_50 = ADD(VARL("clz32_n"), CAST(32, IL_FALSE, SN(32, 2))); + RzILOpEffect *op_ASSIGN_ADD_51 = SETL("clz32_n", op_ADD_50); + + // clz32_x = (clz32_x << 0x2); + RzILOpPure *op_SHIFTL_53 = SHIFTL0(VARL("clz32_x"), SN(32, 2)); + RzILOpEffect *op_ASSIGN_LEFT_54 = SETL("clz32_x", op_SHIFTL_53); + + // seq(clz32_n = clz32_n + ((ut32) 0x2); clz32_x = (clz32_x << 0x2)); + RzILOpEffect *seq_then_55 = SEQN(2, op_ASSIGN_ADD_51, op_ASSIGN_LEFT_54); + + // if ((clz32_x <= 0x3fffffff)) {seq(clz32_n = clz32_n + ((ut32) 0x2); clz32_x = (clz32_x << 0x2))} else {{}}; + RzILOpPure *op_LE_47 = ULE(VARL("clz32_x"), UN(32, 0x3fffffff)); + RzILOpEffect *branch_56 = BRANCH(op_LE_47, seq_then_55, EMPTY()); + + // HYB(++clz32_n); + RzILOpEffect *op_INC_59 = SETL("clz32_n", INC(VARL("clz32_n"), 32)); + + // h_tmp0 = HYB(++clz32_n); + RzILOpEffect *op_ASSIGN_hybrid_tmp_61 = SETL("h_tmp0", VARL("clz32_n")); + + // seq(h_tmp0 = HYB(++clz32_n); HYB(++clz32_n)); + RzILOpEffect *seq_62 = SEQN(2, op_ASSIGN_hybrid_tmp_61, op_INC_59); + + // seq(h_tmp0; {}); + RzILOpEffect *seq_then_63 = EMPTY(); + + // seq(seq(h_tmp0 = HYB(++clz32_n); HYB(++clz32_n)); seq(h_tmp0; {} ...; + RzILOpEffect *seq_64 = SEQN(2, seq_62, seq_then_63); + + // if ((clz32_x <= 0x7fffffff)) {seq(seq(h_tmp0 = HYB(++clz32_n); HYB(++clz32_n)); seq(h_tmp0; {} ...} else {{}}; + RzILOpPure *op_LE_58 = ULE(VARL("clz32_x"), UN(32, 0x7fffffff)); + RzILOpEffect *branch_65 = BRANCH(op_LE_58, seq_64, EMPTY()); + + // ret_val_ut32 = ((ut64) clz32_n); + RzILOpEffect *set_return_val_67 = SETL("ret_val", CAST(64, IL_FALSE, VARL("clz32_n"))); + + // seq(ret_val_ut32 = ((ut64) 0x20)); + RzILOpEffect *seq_then_68 = set_return_val_8; + + // seq(clz32_n = ((ut32) 0x0); if ((clz32_x <= 0xffff)) {seq(clz32_ ...; + RzILOpEffect *seq_else_69 = SEQN(7, op_ASSIGN_11, branch_23, branch_34, branch_45, branch_56, branch_65, set_return_val_67); + + // if ((clz32_x == ((ut32) 0x0))) {seq(ret_val_ut32 = ((ut64) 0x20))} else {seq(clz32_n = ((ut32) 0x0); if ((clz32_x <= 0xffff)) {seq(clz32_ ...}; + RzILOpPure *op_EQ_4 = EQ(VARL("clz32_x"), CAST(32, IL_FALSE, SN(32, 0))); + RzILOpEffect *branch_70 = BRANCH(op_EQ_4, seq_then_68, seq_else_69); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_1, branch_70); + return instruction_sequence; +} + +RZ_OWN RzILOpEffect *hex_clz64(RZ_BORROW RzILOpPure *t) { + + // READ + // Declare: ut64 clz64_x; + // Declare: ut64 clz64_n; + + // clz64_x = t; + RzILOpEffect *op_ASSIGN_1 = SETL("clz64_x", t); + + // ret_val_ut64 = ((ut64) 0x40); + RzILOpEffect *set_return_val_8 = SETL("ret_val", CAST(64, IL_FALSE, SN(32, 0x40))); + + // clz64_n = ((ut64) 0x0); + RzILOpEffect *op_ASSIGN_11 = SETL("clz64_n", CAST(64, IL_FALSE, SN(32, 0))); + + // clz64_n = clz64_n + ((ut64) 0x20); + RzILOpPure *op_ADD_17 = ADD(VARL("clz64_n"), CAST(64, IL_FALSE, SN(32, 0x20))); + RzILOpEffect *op_ASSIGN_ADD_18 = SETL("clz64_n", op_ADD_17); + + // clz64_x = (clz64_x << 0x20); + RzILOpPure *op_SHIFTL_20 = SHIFTL0(VARL("clz64_x"), SN(32, 0x20)); + RzILOpEffect *op_ASSIGN_LEFT_21 = SETL("clz64_x", op_SHIFTL_20); + + // seq(clz64_n = clz64_n + ((ut64) 0x20); clz64_x = (clz64_x << 0x2 ...; + RzILOpEffect *seq_then_22 = SEQN(2, op_ASSIGN_ADD_18, op_ASSIGN_LEFT_21); + + // if ((clz64_x <= 0xffffffff)) {seq(clz64_n = clz64_n + ((ut64) 0x20); clz64_x = (clz64_x << 0x2 ...} else {{}}; + RzILOpPure *op_LE_14 = ULE(VARL("clz64_x"), UN(64, 0xffffffff)); + RzILOpEffect *branch_23 = BRANCH(op_LE_14, seq_then_22, EMPTY()); + + // clz64_n = clz64_n + ((ut64) 0x10); + RzILOpPure *op_ADD_28 = ADD(VARL("clz64_n"), CAST(64, IL_FALSE, SN(32, 16))); + RzILOpEffect *op_ASSIGN_ADD_29 = SETL("clz64_n", op_ADD_28); + + // clz64_x = (clz64_x << 0x10); + RzILOpPure *op_SHIFTL_31 = SHIFTL0(VARL("clz64_x"), SN(32, 16)); + RzILOpEffect *op_ASSIGN_LEFT_32 = SETL("clz64_x", op_SHIFTL_31); + + // seq(clz64_n = clz64_n + ((ut64) 0x10); clz64_x = (clz64_x << 0x1 ...; + RzILOpEffect *seq_then_33 = SEQN(2, op_ASSIGN_ADD_29, op_ASSIGN_LEFT_32); + + // if ((clz64_x <= 0xffffffffffff)) {seq(clz64_n = clz64_n + ((ut64) 0x10); clz64_x = (clz64_x << 0x1 ...} else {{}}; + RzILOpPure *op_LE_25 = ULE(VARL("clz64_x"), UN(64, 0xffffffffffff)); + RzILOpEffect *branch_34 = BRANCH(op_LE_25, seq_then_33, EMPTY()); + + // clz64_n = clz64_n + ((ut64) 0x8); + RzILOpPure *op_ADD_39 = ADD(VARL("clz64_n"), CAST(64, IL_FALSE, SN(32, 8))); + RzILOpEffect *op_ASSIGN_ADD_40 = SETL("clz64_n", op_ADD_39); + + // clz64_x = (clz64_x << 0x8); + RzILOpPure *op_SHIFTL_42 = SHIFTL0(VARL("clz64_x"), SN(32, 8)); + RzILOpEffect *op_ASSIGN_LEFT_43 = SETL("clz64_x", op_SHIFTL_42); + + // seq(clz64_n = clz64_n + ((ut64) 0x8); clz64_x = (clz64_x << 0x8)); + RzILOpEffect *seq_then_44 = SEQN(2, op_ASSIGN_ADD_40, op_ASSIGN_LEFT_43); + + // if ((clz64_x <= 0xffffffffffffff)) {seq(clz64_n = clz64_n + ((ut64) 0x8); clz64_x = (clz64_x << 0x8))} else {{}}; + RzILOpPure *op_LE_36 = ULE(VARL("clz64_x"), UN(64, 0xffffffffffffff)); + RzILOpEffect *branch_45 = BRANCH(op_LE_36, seq_then_44, EMPTY()); + + // clz64_n = clz64_n + ((ut64) 0x4); + RzILOpPure *op_ADD_50 = ADD(VARL("clz64_n"), CAST(64, IL_FALSE, SN(32, 4))); + RzILOpEffect *op_ASSIGN_ADD_51 = SETL("clz64_n", op_ADD_50); + + // clz64_x = (clz64_x << 0x4); + RzILOpPure *op_SHIFTL_53 = SHIFTL0(VARL("clz64_x"), SN(32, 4)); + RzILOpEffect *op_ASSIGN_LEFT_54 = SETL("clz64_x", op_SHIFTL_53); + + // seq(clz64_n = clz64_n + ((ut64) 0x4); clz64_x = (clz64_x << 0x4)); + RzILOpEffect *seq_then_55 = SEQN(2, op_ASSIGN_ADD_51, op_ASSIGN_LEFT_54); + + // if ((clz64_x <= 0xfffffffffffffff)) {seq(clz64_n = clz64_n + ((ut64) 0x4); clz64_x = (clz64_x << 0x4))} else {{}}; + RzILOpPure *op_LE_47 = ULE(VARL("clz64_x"), UN(64, 0xfffffffffffffff)); + RzILOpEffect *branch_56 = BRANCH(op_LE_47, seq_then_55, EMPTY()); + + // clz64_n = clz64_n + ((ut64) 0x2); + RzILOpPure *op_ADD_61 = ADD(VARL("clz64_n"), CAST(64, IL_FALSE, SN(32, 2))); + RzILOpEffect *op_ASSIGN_ADD_62 = SETL("clz64_n", op_ADD_61); + + // clz64_x = (clz64_x << 0x2); + RzILOpPure *op_SHIFTL_64 = SHIFTL0(VARL("clz64_x"), SN(32, 2)); + RzILOpEffect *op_ASSIGN_LEFT_65 = SETL("clz64_x", op_SHIFTL_64); + + // seq(clz64_n = clz64_n + ((ut64) 0x2); clz64_x = (clz64_x << 0x2)); + RzILOpEffect *seq_then_66 = SEQN(2, op_ASSIGN_ADD_62, op_ASSIGN_LEFT_65); + + // if ((clz64_x <= 0x3fffffffffffffff)) {seq(clz64_n = clz64_n + ((ut64) 0x2); clz64_x = (clz64_x << 0x2))} else {{}}; + RzILOpPure *op_LE_58 = ULE(VARL("clz64_x"), UN(64, 0x3fffffffffffffff)); + RzILOpEffect *branch_67 = BRANCH(op_LE_58, seq_then_66, EMPTY()); + + // HYB(++clz64_n); + RzILOpEffect *op_INC_70 = SETL("clz64_n", INC(VARL("clz64_n"), 64)); + + // h_tmp0 = HYB(++clz64_n); + RzILOpEffect *op_ASSIGN_hybrid_tmp_72 = SETL("h_tmp0", VARL("clz64_n")); + + // seq(h_tmp0 = HYB(++clz64_n); HYB(++clz64_n)); + RzILOpEffect *seq_73 = SEQN(2, op_ASSIGN_hybrid_tmp_72, op_INC_70); + + // seq(h_tmp0; {}); + RzILOpEffect *seq_then_74 = EMPTY(); + + // seq(seq(h_tmp0 = HYB(++clz64_n); HYB(++clz64_n)); seq(h_tmp0; {} ...; + RzILOpEffect *seq_75 = SEQN(2, seq_73, seq_then_74); + + // if ((clz64_x <= 0x7fffffffffffffff)) {seq(seq(h_tmp0 = HYB(++clz64_n); HYB(++clz64_n)); seq(h_tmp0; {} ...} else {{}}; + RzILOpPure *op_LE_69 = ULE(VARL("clz64_x"), UN(64, 0x7fffffffffffffff)); + RzILOpEffect *branch_76 = BRANCH(op_LE_69, seq_75, EMPTY()); + + // ret_val_ut64 = clz64_n; + RzILOpEffect *set_return_val_77 = SETL("ret_val", VARL("clz64_n")); + + // seq(ret_val_ut64 = ((ut64) 0x40)); + RzILOpEffect *seq_then_78 = set_return_val_8; + + // seq(clz64_n = ((ut64) 0x0); if ((clz64_x <= 0xffffffff)) {seq(cl ...; + RzILOpEffect *seq_else_79 = SEQN(8, op_ASSIGN_11, branch_23, branch_34, branch_45, branch_56, branch_67, branch_76, set_return_val_77); + + // if ((clz64_x == ((ut64) 0x0))) {seq(ret_val_ut64 = ((ut64) 0x40))} else {seq(clz64_n = ((ut64) 0x0); if ((clz64_x <= 0xffffffff)) {seq(cl ...}; + RzILOpPure *op_EQ_4 = EQ(VARL("clz64_x"), CAST(64, IL_FALSE, SN(32, 0))); + RzILOpEffect *branch_80 = BRANCH(op_EQ_4, seq_then_78, seq_else_79); + + RzILOpEffect *instruction_sequence = SEQN(2, op_ASSIGN_1, branch_80); + return instruction_sequence; +} + +RZ_OWN RzILOpEffect *hex_clo32(RZ_BORROW RzILOpPure *x) { + + // READ + + // clz32((~x)); + RzILOpPure *op_NOT_0 = LOGNOT(x); + RzILOpEffect *clz32_call_1 = hex_clz32(op_NOT_0); + + // h_tmp0 = clz32((~x)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_3 = SETL("h_tmp0", UNSIGNED(32, VARL("ret_val"))); + + // seq(clz32((~x)); h_tmp0 = clz32((~x))); + RzILOpEffect *seq_4 = SEQN(2, clz32_call_1, op_ASSIGN_hybrid_tmp_3); + + // ret_val_ut32 = ((ut64) h_tmp0); + RzILOpEffect *set_return_val_7 = SETL("ret_val", CAST(64, IL_FALSE, VARL("h_tmp0"))); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_4, set_return_val_7); + return instruction_sequence; +} + +RZ_OWN RzILOpEffect *hex_clo64(RZ_BORROW RzILOpPure *x) { + + // READ + + // clz64((~x)); + RzILOpPure *op_NOT_0 = LOGNOT(x); + RzILOpEffect *clz64_call_1 = hex_clz64(op_NOT_0); + + // h_tmp0 = clz64((~x)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_3 = SETL("h_tmp0", UNSIGNED(64, VARL("ret_val"))); + + // seq(clz64((~x)); h_tmp0 = clz64((~x))); + RzILOpEffect *seq_4 = SEQN(2, clz64_call_1, op_ASSIGN_hybrid_tmp_3); + + // ret_val_ut64 = h_tmp0; + RzILOpEffect *set_return_val_6 = SETL("ret_val", VARL("h_tmp0")); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_4, set_return_val_6); + return instruction_sequence; +} + +RZ_OWN RzILOpEffect *hex_revbit16(RZ_BORROW RzILOpPure *t) { + + // READ + // Declare: ut16 revbit16_x; + + // revbit16_x = bswap16(t); + RzILOpEffect *op_ASSIGN_2 = SETL("revbit16_x", BSWAP16(t)); + + // revbit16_x = ((ut16) (((st32) ((ut16) ((((st32) revbit16_x) & 0xf0f0) >> 0x4))) | ((st32) ((ut16) ((((st32) revbit16_x) & 0xf0f) << 0x4))))); + RzILOpPure *op_AND_5 = LOGAND(CAST(32, IL_FALSE, VARL("revbit16_x")), SN(32, 0xf0f0)); + RzILOpPure *op_RSHIFT_7 = SHIFTRA(op_AND_5, SN(32, 4)); + RzILOpPure *op_AND_11 = LOGAND(CAST(32, IL_FALSE, VARL("revbit16_x")), SN(32, 0xf0f)); + RzILOpPure *op_LSHIFT_13 = SHIFTL0(op_AND_11, SN(32, 4)); + RzILOpPure *op_OR_17 = LOGOR(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_RSHIFT_7)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_LSHIFT_13))); + RzILOpEffect *op_ASSIGN_19 = SETL("revbit16_x", CAST(16, IL_FALSE, op_OR_17)); + + // revbit16_x = ((ut16) (((((st32) ((ut16) ((((st32) revbit16_x) & 0x8888) >> 0x3))) | ((st32) ((ut16) ((((st32) revbit16_x) & 0x4444) >> 0x1)))) | ((st32) ((ut16) ((((st32) revbit16_x) & 0x2222) << 0x1)))) | ((st32) ((ut16) ((((st32) revbit16_x) & 0x1111) << 0x3))))); + RzILOpPure *op_AND_22 = LOGAND(CAST(32, IL_FALSE, VARL("revbit16_x")), SN(32, 0x8888)); + RzILOpPure *op_RSHIFT_24 = SHIFTRA(op_AND_22, SN(32, 3)); + RzILOpPure *op_AND_28 = LOGAND(CAST(32, IL_FALSE, VARL("revbit16_x")), SN(32, 0x4444)); + RzILOpPure *op_RSHIFT_30 = SHIFTRA(op_AND_28, SN(32, 1)); + RzILOpPure *op_OR_34 = LOGOR(CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_RSHIFT_24)), CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_RSHIFT_30))); + RzILOpPure *op_AND_37 = LOGAND(CAST(32, IL_FALSE, VARL("revbit16_x")), SN(32, 0x2222)); + RzILOpPure *op_LSHIFT_39 = SHIFTL0(op_AND_37, SN(32, 1)); + RzILOpPure *op_OR_42 = LOGOR(op_OR_34, CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_LSHIFT_39))); + RzILOpPure *op_AND_45 = LOGAND(CAST(32, IL_FALSE, VARL("revbit16_x")), SN(32, 0x1111)); + RzILOpPure *op_LSHIFT_47 = SHIFTL0(op_AND_45, SN(32, 3)); + RzILOpPure *op_OR_50 = LOGOR(op_OR_42, CAST(32, IL_FALSE, CAST(16, IL_FALSE, op_LSHIFT_47))); + RzILOpEffect *op_ASSIGN_52 = SETL("revbit16_x", CAST(16, IL_FALSE, op_OR_50)); + + // ret_val_ut16 = ((ut64) revbit16_x); + RzILOpEffect *set_return_val_55 = SETL("ret_val", CAST(64, IL_FALSE, VARL("revbit16_x"))); + + RzILOpEffect *instruction_sequence = SEQN(4, op_ASSIGN_2, op_ASSIGN_19, op_ASSIGN_52, set_return_val_55); + return instruction_sequence; +} + +RZ_OWN RzILOpEffect *hex_revbit32(RZ_BORROW RzILOpPure *t) { + + // READ + // Declare: ut32 revbit32_x; + + // revbit32_x = bswap32(t); + RzILOpEffect *op_ASSIGN_2 = SETL("revbit32_x", BSWAP32(t)); + + // revbit32_x = (((revbit32_x & 0xf0f0f0f0) >> 0x4) | ((revbit32_x & 0xf0f0f0f) << 0x4)); + RzILOpPure *op_AND_4 = LOGAND(VARL("revbit32_x"), UN(32, 0xf0f0f0f0)); + RzILOpPure *op_RSHIFT_6 = SHIFTR0(op_AND_4, SN(32, 4)); + RzILOpPure *op_AND_8 = LOGAND(VARL("revbit32_x"), UN(32, 0xf0f0f0f)); + RzILOpPure *op_LSHIFT_10 = SHIFTL0(op_AND_8, SN(32, 4)); + RzILOpPure *op_OR_11 = LOGOR(op_RSHIFT_6, op_LSHIFT_10); + RzILOpEffect *op_ASSIGN_12 = SETL("revbit32_x", op_OR_11); + + // revbit32_x = (((((revbit32_x & 0x88888888) >> 0x3) | ((revbit32_x & 0x44444444) >> 0x1)) | ((revbit32_x & 0x22222222) << 0x1)) | ((revbit32_x & 0x11111111) << 0x3)); + RzILOpPure *op_AND_14 = LOGAND(VARL("revbit32_x"), UN(32, 0x88888888)); + RzILOpPure *op_RSHIFT_16 = SHIFTR0(op_AND_14, SN(32, 3)); + RzILOpPure *op_AND_18 = LOGAND(VARL("revbit32_x"), UN(32, 0x44444444)); + RzILOpPure *op_RSHIFT_20 = SHIFTR0(op_AND_18, SN(32, 1)); + RzILOpPure *op_OR_21 = LOGOR(op_RSHIFT_16, op_RSHIFT_20); + RzILOpPure *op_AND_23 = LOGAND(VARL("revbit32_x"), UN(32, 0x22222222)); + RzILOpPure *op_LSHIFT_25 = SHIFTL0(op_AND_23, SN(32, 1)); + RzILOpPure *op_OR_26 = LOGOR(op_OR_21, op_LSHIFT_25); + RzILOpPure *op_AND_28 = LOGAND(VARL("revbit32_x"), UN(32, 0x11111111)); + RzILOpPure *op_LSHIFT_30 = SHIFTL0(op_AND_28, SN(32, 3)); + RzILOpPure *op_OR_31 = LOGOR(op_OR_26, op_LSHIFT_30); + RzILOpEffect *op_ASSIGN_32 = SETL("revbit32_x", op_OR_31); + + // ret_val_ut32 = ((ut64) revbit32_x); + RzILOpEffect *set_return_val_35 = SETL("ret_val", CAST(64, IL_FALSE, VARL("revbit32_x"))); + + RzILOpEffect *instruction_sequence = SEQN(4, op_ASSIGN_2, op_ASSIGN_12, op_ASSIGN_32, set_return_val_35); + return instruction_sequence; +} + +RZ_OWN RzILOpEffect *hex_revbit64(RZ_BORROW RzILOpPure *t) { + + // READ + // Declare: ut64 revbit64_x; + + // revbit64_x = bswap64(t); + RzILOpEffect *op_ASSIGN_2 = SETL("revbit64_x", BSWAP64(t)); + + // revbit64_x = (((revbit64_x & 0xf0f0f0f0f0f0f0f0) >> 0x4) | ((revbit64_x & 0xf0f0f0f0f0f0f0f) << 0x4)); + RzILOpPure *op_AND_4 = LOGAND(VARL("revbit64_x"), UN(64, 0xf0f0f0f0f0f0f0f0)); + RzILOpPure *op_RSHIFT_6 = SHIFTR0(op_AND_4, SN(32, 4)); + RzILOpPure *op_AND_8 = LOGAND(VARL("revbit64_x"), UN(64, 0xf0f0f0f0f0f0f0f)); + RzILOpPure *op_LSHIFT_10 = SHIFTL0(op_AND_8, SN(32, 4)); + RzILOpPure *op_OR_11 = LOGOR(op_RSHIFT_6, op_LSHIFT_10); + RzILOpEffect *op_ASSIGN_12 = SETL("revbit64_x", op_OR_11); + + // revbit64_x = (((((revbit64_x & 0x8888888888888888) >> 0x3) | ((revbit64_x & 0x4444444444444444) >> 0x1)) | ((revbit64_x & 0x2222222222222222) << 0x1)) | ((revbit64_x & 0x1111111111111111) << 0x3)); + RzILOpPure *op_AND_14 = LOGAND(VARL("revbit64_x"), UN(64, 0x8888888888888888)); + RzILOpPure *op_RSHIFT_16 = SHIFTR0(op_AND_14, SN(32, 3)); + RzILOpPure *op_AND_18 = LOGAND(VARL("revbit64_x"), UN(64, 0x4444444444444444)); + RzILOpPure *op_RSHIFT_20 = SHIFTR0(op_AND_18, SN(32, 1)); + RzILOpPure *op_OR_21 = LOGOR(op_RSHIFT_16, op_RSHIFT_20); + RzILOpPure *op_AND_23 = LOGAND(VARL("revbit64_x"), UN(64, 0x2222222222222222)); + RzILOpPure *op_LSHIFT_25 = SHIFTL0(op_AND_23, SN(32, 1)); + RzILOpPure *op_OR_26 = LOGOR(op_OR_21, op_LSHIFT_25); + RzILOpPure *op_AND_28 = LOGAND(VARL("revbit64_x"), UN(64, 0x1111111111111111)); + RzILOpPure *op_LSHIFT_30 = SHIFTL0(op_AND_28, SN(32, 3)); + RzILOpPure *op_OR_31 = LOGOR(op_OR_26, op_LSHIFT_30); + RzILOpEffect *op_ASSIGN_32 = SETL("revbit64_x", op_OR_31); + + // ret_val_ut64 = revbit64_x; + RzILOpEffect *set_return_val_34 = SETL("ret_val", VARL("revbit64_x")); + + RzILOpEffect *instruction_sequence = SEQN(4, op_ASSIGN_2, op_ASSIGN_12, op_ASSIGN_32, set_return_val_34); + return instruction_sequence; +} + +RZ_OWN RzILOpEffect *hex_fbrev(RZ_BORROW RzILOpPure *addr) { + + // READ + + // revbit16(((ut16) addr)); + RzILOpEffect *revbit16_call_3 = hex_revbit16(CAST(16, IL_FALSE, addr)); + + // h_tmp0 = revbit16(((ut16) addr)); + RzILOpEffect *op_ASSIGN_hybrid_tmp_5 = SETL("h_tmp0", UNSIGNED(16, VARL("ret_val"))); + + // seq(revbit16(((ut16) addr)); h_tmp0 = revbit16(((ut16) addr))); + RzILOpEffect *seq_6 = SEQN(2, revbit16_call_3, op_ASSIGN_hybrid_tmp_5); + + // ret_val_ut32 = ((ut64) deposit32(addr, 0x0, 0x10, ((ut32) h_tmp0))); + RzILOpEffect *set_return_val_11 = SETL("ret_val", CAST(64, IL_FALSE, DEPOSIT32(DUP(addr), SN(32, 0), SN(32, 16), CAST(32, IL_FALSE, VARL("h_tmp0"))))); + + RzILOpEffect *instruction_sequence = SEQN(2, seq_6, set_return_val_11); + return instruction_sequence; +} + +RZ_OWN RzILOpEffect *hex_conv_round(RZ_BORROW RzILOpPure *a, RZ_BORROW RzILOpPure *n) { + + // READ + // Declare: st64 conv_val; + + // conv_val = ((st64) a); + RzILOpEffect *op_ASSIGN_4 = SETL("conv_val", CAST(64, MSB(a), DUP(a))); + + // conv_val = ((st64) a) + ((st64) (((ut32) ((0x1 << n) & a)) >> 0x1)); + RzILOpPure *op_LSHIFT_16 = SHIFTL0(SN(32, 1), n); + RzILOpPure *op_AND_17 = LOGAND(op_LSHIFT_16, DUP(a)); + RzILOpPure *op_RSHIFT_20 = SHIFTR0(CAST(32, IL_FALSE, op_AND_17), SN(32, 1)); + RzILOpPure *op_ADD_22 = ADD(CAST(64, MSB(DUP(a)), DUP(a)), CAST(64, IL_FALSE, op_RSHIFT_20)); + RzILOpEffect *op_ASSIGN_23 = SETL("conv_val", op_ADD_22); + + // conv_val = ((st64) a) + ((st64) (0x1 << n - 0x1)); + RzILOpPure *op_SUB_27 = SUB(DUP(n), SN(32, 1)); + RzILOpPure *op_LSHIFT_28 = SHIFTL0(SN(32, 1), op_SUB_27); + RzILOpPure *op_ADD_30 = ADD(CAST(64, MSB(DUP(a)), DUP(a)), CAST(64, MSB(op_LSHIFT_28), DUP(op_LSHIFT_28))); + RzILOpEffect *op_ASSIGN_31 = SETL("conv_val", op_ADD_30); + + // seq(conv_val = ((st64) a) + ((st64) (((ut32) ((0x1 << n) & a)) > ...; + RzILOpEffect *seq_then_32 = op_ASSIGN_23; + + // seq(conv_val = ((st64) a) + ((st64) (0x1 << n - 0x1))); + RzILOpEffect *seq_else_33 = op_ASSIGN_31; + + // if (((a & (0x1 << n - 0x1) - 0x1) == 0x0)) {seq(conv_val = ((st64) a) + ((st64) (((ut32) ((0x1 << n) & a)) > ...} else {seq(conv_val = ((st64) a) + ((st64) (0x1 << n - 0x1)))}; + RzILOpPure *op_SUB_7 = SUB(DUP(n), SN(32, 1)); + RzILOpPure *op_LSHIFT_8 = SHIFTL0(SN(32, 1), op_SUB_7); + RzILOpPure *op_SUB_10 = SUB(op_LSHIFT_8, SN(32, 1)); + RzILOpPure *op_AND_11 = LOGAND(DUP(a), op_SUB_10); + RzILOpPure *op_EQ_13 = EQ(op_AND_11, SN(32, 0)); + RzILOpEffect *branch_34 = BRANCH(op_EQ_13, seq_then_32, seq_else_33); + + // seq(conv_val = ((st64) a)); + RzILOpEffect *seq_then_35 = op_ASSIGN_4; + + // seq(if (((a & (0x1 << n - 0x1) - 0x1) == 0x0)) {seq(conv_val = ( ...; + RzILOpEffect *seq_else_36 = branch_34; + + // if ((n == 0x0)) {seq(conv_val = ((st64) a))} else {seq(if (((a & (0x1 << n - 0x1) - 0x1) == 0x0)) {seq(conv_val = ( ...}; + RzILOpPure *op_EQ_2 = EQ(DUP(n), SN(32, 0)); + RzILOpEffect *branch_37 = BRANCH(op_EQ_2, seq_then_35, seq_else_36); + + // conv_val = (conv_val >> n); + RzILOpPure *op_RSHIFT_38 = SHIFTRA(VARL("conv_val"), DUP(n)); + RzILOpEffect *op_ASSIGN_39 = SETL("conv_val", op_RSHIFT_38); + + // ret_val_st32 = ((ut64) ((st32) conv_val)); + RzILOpEffect *set_return_val_43 = SETL("ret_val", CAST(64, IL_FALSE, CAST(32, MSB(VARL("conv_val")), VARL("conv_val")))); + + RzILOpEffect *instruction_sequence = SEQN(3, branch_37, op_ASSIGN_39, set_return_val_43); + return instruction_sequence; +} + +RZ_OWN RzILOpEffect *hex_set_usr_field(HexInsnPktBundle *bundle, HexRegField field, RZ_BORROW RzILOpPure *val) { + HexPkt *pkt = bundle->pkt; + + // READ + const HexOp usr_op = ALIAS2OP(HEX_REG_ALIAS_USR, false); + + // usr = ((ut32) (REGFIELD(HEX_RF_WIDTH, field) ? deposit64(((ut64) usr), ((st32) REGFIELD(HEX_RF_OFFSET, field)), ((st32) REGFIELD(HEX_RF_WIDTH, field)), ((ut64) val)) : ((ut64) usr))); + RzILOpPure *cond_10 = ITE(NON_ZERO(HEX_REGFIELD(HEX_RF_WIDTH, field)), DEPOSIT64(CAST(64, IL_FALSE, READ_REG(pkt, &usr_op, true)), CAST(32, IL_FALSE, HEX_REGFIELD(HEX_RF_OFFSET, field)), CAST(32, IL_FALSE, HEX_REGFIELD(HEX_RF_WIDTH, field)), CAST(64, IL_FALSE, val)), CAST(64, IL_FALSE, READ_REG(pkt, &usr_op, true))); + RzILOpEffect *op_ASSIGN_12 = WRITE_REG(bundle, &usr_op, CAST(32, IL_FALSE, cond_10)); + + RzILOpEffect *instruction_sequence = op_ASSIGN_12; + return instruction_sequence; +} + +RZ_OWN RzILOpEffect *hex_get_usr_field(HexInsnPktBundle *bundle, HexRegField field) { + HexPkt *pkt = bundle->pkt; + + // READ + const HexOp usr_op = ALIAS2OP(HEX_REG_ALIAS_USR, false); + RzILOpPure *usr = READ_REG(pkt, &usr_op, false); + + // ret_val_ut32 = (REGFIELD(HEX_RF_WIDTH, field) ? extract64(((ut64) usr), ((st32) REGFIELD(HEX_RF_OFFSET, field)), ((st32) REGFIELD(HEX_RF_WIDTH, field))) : ((ut64) 0x0)); + RzILOpPure *cond_10 = ITE(NON_ZERO(HEX_REGFIELD(HEX_RF_WIDTH, field)), EXTRACT64(CAST(64, IL_FALSE, usr), CAST(32, IL_FALSE, HEX_REGFIELD(HEX_RF_OFFSET, field)), CAST(32, IL_FALSE, HEX_REGFIELD(HEX_RF_WIDTH, field))), CAST(64, IL_FALSE, SN(64, 0))); + RzILOpEffect *set_return_val_12 = SETL("ret_val", cond_10); + + RzILOpEffect *instruction_sequence = set_return_val_12; + return instruction_sequence; +} + +/** + * \brief Returns the value of an register field property. + * + * \param property The property to get the value for. + * \param field The register field. + * \return RzILOpPure The value as integer as U32 or U32_MAX on failure. + */ +RZ_IPI RZ_OWN RzILOpPure *hex_get_rf_property_val(const HexRegFieldProperty property, const HexRegField field) { + RzILOpPure *r = NULL; + switch (field) { + default: + RZ_LOG_WARN("Register field not implemented.\n"); + break; + case HEX_REG_FIELD_USR_LPCFG: + if (property == HEX_RF_WIDTH) { + r = U32(2); + } else if (property == HEX_RF_OFFSET) { + r = U32(8); + } + break; + case HEX_REG_FIELD_USR_OVF: + if (property == HEX_RF_WIDTH) { + r = U32(1); + } else if (property == HEX_RF_OFFSET) { + r = U32(0); + } + break; + } + return r; +} + +/** + * \brief Returns the next PC as pure. + * + * \param pkt The instruction packet. + * \return RzILOpPure* The next PC as pure. + */ +RZ_IPI RZ_OWN RzILOpEffect *hex_get_npc(const HexPkt *pkt) { + rz_return_val_if_fail(pkt, NULL); + RzILOpPure *r; + r = U64(pkt->pkt_addr + (rz_list_length(pkt->bin) * HEX_INSN_SIZE)); + return SETL("ret_val", r); +} + +RZ_IPI RZ_OWN RzILOpEffect *hex_commit_packet(HexInsnPktBundle *bundle) { + HexILExecData *stats = &bundle->pkt->il_op_stats; + RzILOpEffect *commit_seq = EMPTY(); + for (ut8 i = 0; i <= HEX_REG_CTR_REGS_C31; ++i) { + if (!(rz_bv_get(stats->ctr_written, i))) { + continue; + } + const char *dest_reg = hex_get_reg_in_class(HEX_REG_CLASS_CTR_REGS, i, false, false, false); + const char *src_reg = hex_get_reg_in_class(HEX_REG_CLASS_CTR_REGS, i, false, true, false); + commit_seq = SEQ2(commit_seq, SETG(dest_reg, VARG(src_reg))); + } + + for (ut8 i = 0; i <= HEX_REG_INT_REGS_R31; ++i) { + if (!(rz_bv_get(stats->gpr_written, i))) { + continue; + } + const char *dest_reg = hex_get_reg_in_class(HEX_REG_CLASS_INT_REGS, i, false, false, false); + const char *src_reg = hex_get_reg_in_class(HEX_REG_CLASS_INT_REGS, i, false, true, false); + commit_seq = SEQ2(commit_seq, SETG(dest_reg, VARG(src_reg))); + } + + for (ut8 i = 0; i <= HEX_REG_PRED_REGS_P3; ++i) { + if (!(rz_bv_get(stats->pred_written, i))) { + continue; + } + const char *dest_reg = hex_get_reg_in_class(HEX_REG_CLASS_PRED_REGS, i, false, false, false); + const char *src_reg = hex_get_reg_in_class(HEX_REG_CLASS_PRED_REGS, i, false, true, false); + commit_seq = SEQ2(commit_seq, SETG(dest_reg, VARG(src_reg))); + } + + hex_reset_il_pkt_stats(stats); + return commit_seq; +} + +RZ_IPI RZ_OWN RzILOpEffect *hex_il_op_jump_flag_init(HexInsnPktBundle *bundle) { + return SEQ2(SETL("jump_flag", IL_FALSE), SETL("jump_target", U32(0xffffffff))); +} + +RZ_IPI RZ_OWN RzILOpEffect *hex_il_op_next_pkt_jmp(HexInsnPktBundle *bundle) { + return BRANCH(VARL("jump_flag"), JMP(VARL("jump_target")), JMP(U32(bundle->pkt->pkt_addr + (HEX_INSN_SIZE * rz_list_length(bundle->pkt->bin))))); +} + +#include diff --git a/librz/arch/meson.build b/librz/arch/meson.build index f22e20b39a9..cb8d698d823 100644 --- a/librz/arch/meson.build +++ b/librz/arch/meson.build @@ -160,6 +160,48 @@ arch_isa_sources = [ 'isa/hexagon/hexagon.c', 'isa/hexagon/hexagon_arch.c', 'isa/hexagon/hexagon_disas.c', + 'isa/hexagon/hexagon_il.c', + 'isa/hexagon/hexagon_il_getter_table.h', + 'isa/hexagon/il_ops/hexagon_il_A2_ops.c', + 'isa/hexagon/il_ops/hexagon_il_A4_ops.c', + 'isa/hexagon/il_ops/hexagon_il_A5_ops.c', + 'isa/hexagon/il_ops/hexagon_il_A6_ops.c', + 'isa/hexagon/il_ops/hexagon_il_A7_ops.c', + 'isa/hexagon/il_ops/hexagon_il_C2_ops.c', + 'isa/hexagon/il_ops/hexagon_il_C4_ops.c', + 'isa/hexagon/il_ops/hexagon_il_F2_ops.c', + 'isa/hexagon/il_ops/hexagon_il_G4_ops.c', + 'isa/hexagon/il_ops/hexagon_il_IMPORTED_ops.c', + 'isa/hexagon/il_ops/hexagon_il_J2_ops.c', + 'isa/hexagon/il_ops/hexagon_il_J4_ops.c', + 'isa/hexagon/il_ops/hexagon_il_L2_ops.c', + 'isa/hexagon/il_ops/hexagon_il_L4_ops.c', + 'isa/hexagon/il_ops/hexagon_il_L6_ops.c', + 'isa/hexagon/il_ops/hexagon_il_M2_ops.c', + 'isa/hexagon/il_ops/hexagon_il_M4_ops.c', + 'isa/hexagon/il_ops/hexagon_il_M5_ops.c', + 'isa/hexagon/il_ops/hexagon_il_M6_ops.c', + 'isa/hexagon/il_ops/hexagon_il_M7_ops.c', + 'isa/hexagon/il_ops/hexagon_il_PS_ops.c', + 'isa/hexagon/il_ops/hexagon_il_R6_ops.c', + 'isa/hexagon/il_ops/hexagon_il_S2_ops.c', + 'isa/hexagon/il_ops/hexagon_il_S4_ops.c', + 'isa/hexagon/il_ops/hexagon_il_S5_ops.c', + 'isa/hexagon/il_ops/hexagon_il_S6_ops.c', + 'isa/hexagon/il_ops/hexagon_il_SA1_ops.c', + 'isa/hexagon/il_ops/hexagon_il_SL1_ops.c', + 'isa/hexagon/il_ops/hexagon_il_SL2_ops.c', + 'isa/hexagon/il_ops/hexagon_il_SS1_ops.c', + 'isa/hexagon/il_ops/hexagon_il_SS2_ops.c', + 'isa/hexagon/il_ops/hexagon_il_UNDOCUMENTED_ops.c', + 'isa/hexagon/il_ops/hexagon_il_V6_ops.c', + 'isa/hexagon/il_ops/hexagon_il_Y2_ops.c', + 'isa/hexagon/il_ops/hexagon_il_Y4_ops.c', + 'isa/hexagon/il_ops/hexagon_il_Y5_ops.c', + 'isa/hexagon/il_ops/hexagon_il_Y6_ops.c', + 'isa/hexagon/il_ops/hexagon_il_dep_ops.c', + 'isa/hexagon/il_ops/hexagon_il_invalid_ops.c', + 'isa/hexagon/il_ops/hexagon_il_non_insn_ops.c', 'isa/i4004/i4004dis.c', 'isa/i8080/i8080dis.c', 'isa/java/assembler.c', diff --git a/librz/arch/p/analysis/analysis_hexagon.c b/librz/arch/p/analysis/analysis_hexagon.c index ba527ddee89..6746b69afd1 100644 --- a/librz/arch/p/analysis/analysis_hexagon.c +++ b/librz/arch/p/analysis/analysis_hexagon.c @@ -1,9 +1,9 @@ // SPDX-FileCopyrightText: 2021 Rot127 // SPDX-License-Identifier: LGPL-3.0-only -// LLVM commit: 96e220e6886868d6663d966ecc396befffc355e7 -// LLVM commit date: 2022-01-05 11:01:52 +0000 (ISO 8601 format) -// Date of code generation: 2022-04-17 16:44:52+02:00 +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 //======================================== // The following code is generated. // Do not edit. Repository of code generator: @@ -17,26 +17,50 @@ #include #include #include +#include RZ_API int hexagon_v6_op(RzAnalysis *analysis, RzAnalysisOp *op, ut64 addr, const ut8 *buf, int len, RzAnalysisOpMask mask) { rz_return_val_if_fail(analysis && op && buf, -1); - if (len < 4) { + if (len < HEX_INSN_SIZE) { return -1; } if (analysis->pcalign == 0) { - analysis->pcalign = 0x4; + analysis->pcalign = HEX_PC_ALIGNMENT; } + // Disassemble as many instructions as possible from the buffer. + ut32 buf_offset = 0; + while (buf_offset + HEX_INSN_SIZE <= len && buf_offset <= HEX_INSN_SIZE * HEX_MAX_INSN_PER_PKT) { + const ut32 buf_ptr = rz_read_at_le32(buf, buf_offset); + if (buf_offset > 0 && (buf_ptr == HEX_INVALID_INSN_0 || buf_ptr == HEX_INVALID_INSN_F)) { + // Do not disassemble invalid instructions, if we already have a valid one. + break; + } + + HexReversedOpcode rev = { .action = HEXAGON_ANALYSIS, .ana_op = op, .asm_op = NULL }; + hexagon_reverse_opcode(NULL, &rev, buf + buf_offset, addr + buf_offset, false); + buf_offset += HEX_INSN_SIZE; + } + // Copy operation actually requested. HexReversedOpcode rev = { .action = HEXAGON_ANALYSIS, .ana_op = op, .asm_op = NULL }; + hexagon_reverse_opcode(NULL, &rev, buf, addr, true); + bool decoded_packet = len > HEX_INSN_SIZE; + if (mask & RZ_ANALYSIS_OP_MASK_IL) { + op->il_op = hex_get_il_op(addr, decoded_packet); + } - hexagon_reverse_opcode(NULL, &rev, buf, addr); + return HEX_INSN_SIZE; +} - return op->size; +static RzAnalysisILConfig *rz_hexagon_il_config(RzAnalysis *a) { + HexState *state = hexagon_state(false); + state->just_init = true; + return rz_analysis_il_config_new(32, a->big_endian, 32); } RZ_API char *get_reg_profile(RzAnalysis *analysis) { const char *p = - "=PC pc\n" + "=PC C9\n" "=SP R29\n" "=BP R30\n" "=LR R31\n" @@ -51,648 +75,679 @@ RZ_API char *get_reg_profile(RzAnalysis *analysis) { "=R0 R0\n" "=R1 R1\n" - "ctr C0 .32 1024 0\n" - "ctr C1 .32 1056 0\n" - "ctr C2 .32 1088 0\n" - "ctr C3 .32 1120 0\n" - "ctr C4 .32 1152 0\n" - "ctr C5 .32 1184 0\n" - "ctr C6 .32 1216 0\n" - "ctr C7 .32 1248 0\n" - "ctr C8 .32 1280 0\n" - "ctr C9 .32 1312 0\n" - "ctr C10 .32 1344 0\n" - "ctr C11 .32 1376 0\n" - "ctr C12 .32 1408 0\n" - "ctr C13 .32 1440 0\n" - "ctr C14 .32 1472 0\n" - "ctr C15 .32 1504 0\n" - "ctr C16 .32 1536 0\n" - "ctr C17 .32 1568 0\n" - "ctr C18 .32 1600 0\n" - "ctr C19 .32 1632 0\n" - "ctr C30 .32 1664 0\n" - "ctr C31 .32 1696 0\n" - - "ctr C1:0 .64 1024 0\n" - "ctr C3:2 .64 1088 0\n" - "ctr C5:4 .64 1152 0\n" - "ctr C7:6 .64 1216 0\n" - "ctr C9:8 .64 1280 0\n" - "ctr C11:10 .64 1344 0\n" - "ctr C13:12 .64 1408 0\n" - "ctr C15:14 .64 1472 0\n" - "ctr C17:16 .64 1536 0\n" - "ctr C19:18 .64 1600 0\n" - "ctr C31:30 .64 1664 0\n" + "ctr C0 .32 0 0\n" + "ctr C0_tmp .32 32 0\n" + "ctr C1 .32 64 0\n" + "ctr C1_tmp .32 96 0\n" + "ctr C2 .32 128 0\n" + "ctr C2_tmp .32 160 0\n" + "ctr C3 .32 192 0\n" + "ctr C3_tmp .32 224 0\n" + "ctr C4 .32 256 0\n" + "ctr C4_tmp .32 288 0\n" + "ctr C5 .32 320 0\n" + "ctr C5_tmp .32 352 0\n" + "ctr C6 .32 384 0\n" + "ctr C6_tmp .32 416 0\n" + "ctr C7 .32 448 0\n" + "ctr C7_tmp .32 480 0\n" + "ctr C8 .32 512 0\n" + "ctr C8_tmp .32 544 0\n" + "ctr C9 .32 576 0\n" + "ctr C9_tmp .32 608 0\n" + "ctr C10 .32 640 0\n" + "ctr C10_tmp .32 672 0\n" + "ctr C11 .32 704 0\n" + "ctr C11_tmp .32 736 0\n" + "ctr C12 .32 768 0\n" + "ctr C12_tmp .32 800 0\n" + "ctr C13 .32 832 0\n" + "ctr C13_tmp .32 864 0\n" + "ctr C14 .32 896 0\n" + "ctr C14_tmp .32 928 0\n" + "ctr C15 .32 960 0\n" + "ctr C15_tmp .32 992 0\n" + "ctr C16 .32 1024 0\n" + "ctr C16_tmp .32 1056 0\n" + "ctr C17 .32 1088 0\n" + "ctr C17_tmp .32 1120 0\n" + "ctr C18 .32 1152 0\n" + "ctr C18_tmp .32 1184 0\n" + "ctr C19 .32 1216 0\n" + "ctr C19_tmp .32 1248 0\n" + "ctr C20 .32 1280 0\n" + "ctr C20_tmp .32 1312 0\n" + "ctr C21 .32 1344 0\n" + "ctr C21_tmp .32 1376 0\n" + "ctr C22 .32 1408 0\n" + "ctr C22_tmp .32 1440 0\n" + "ctr C23 .32 1472 0\n" + "ctr C23_tmp .32 1504 0\n" + "ctr C24 .32 1536 0\n" + "ctr C24_tmp .32 1568 0\n" + "ctr C25 .32 1600 0\n" + "ctr C25_tmp .32 1632 0\n" + "ctr C26 .32 1664 0\n" + "ctr C26_tmp .32 1696 0\n" + "ctr C27 .32 1728 0\n" + "ctr C27_tmp .32 1760 0\n" + "ctr C28 .32 1792 0\n" + "ctr C28_tmp .32 1824 0\n" + "ctr C29 .32 1856 0\n" + "ctr C29_tmp .32 1888 0\n" + "ctr C30 .32 1920 0\n" + "ctr C30_tmp .32 1952 0\n" + "ctr C31 .32 1984 0\n" + "ctr C31_tmp .32 2016 0\n" - "gpr R1:0 .64 0 0\n" - "gpr R3:2 .64 64 0\n" - "gpr R5:4 .64 128 0\n" - "gpr R7:6 .64 192 0\n" - "gpr R9:8 .64 256 0\n" - "gpr R11:10 .64 320 0\n" - "gpr R13:12 .64 384 0\n" - "gpr R15:14 .64 448 0\n" - "gpr R17:16 .64 512 0\n" - "gpr R19:18 .64 576 0\n" - "gpr R21:20 .64 640 0\n" - "gpr R23:22 .64 704 0\n" - "gpr R25:24 .64 768 0\n" - "gpr R27:26 .64 832 0\n" - "gpr R29:28 .64 896 0\n" - "gpr R31:30 .64 960 0\n" + "ctr C1:0 .64 2048 0\n" + "ctr C1:0_tmp .64 2112 0\n" + "ctr C3:2 .64 2176 0\n" + "ctr C3:2_tmp .64 2240 0\n" + "ctr C5:4 .64 2304 0\n" + "ctr C5:4_tmp .64 2368 0\n" + "ctr C7:6 .64 2432 0\n" + "ctr C7:6_tmp .64 2496 0\n" + "ctr C9:8 .64 2560 0\n" + "ctr C9:8_tmp .64 2624 0\n" + "ctr C11:10 .64 2688 0\n" + "ctr C11:10_tmp .64 2752 0\n" + "ctr C13:12 .64 2816 0\n" + "ctr C13:12_tmp .64 2880 0\n" + "ctr C15:14 .64 2944 0\n" + "ctr C15:14_tmp .64 3008 0\n" + "ctr C17:16 .64 3072 0\n" + "ctr C17:16_tmp .64 3136 0\n" + "ctr C19:18 .64 3200 0\n" + "ctr C19:18_tmp .64 3264 0\n" + "ctr C21:20 .64 3328 0\n" + "ctr C21:20_tmp .64 3392 0\n" + "ctr C23:22 .64 3456 0\n" + "ctr C23:22_tmp .64 3520 0\n" + "ctr C25:24 .64 3584 0\n" + "ctr C25:24_tmp .64 3648 0\n" + "ctr C27:26 .64 3712 0\n" + "ctr C27:26_tmp .64 3776 0\n" + "ctr C29:28 .64 3840 0\n" + "ctr C29:28_tmp .64 3904 0\n" + "ctr C31:30 .64 3968 0\n" + "ctr C31:30_tmp .64 4032 0\n" - "gpr G0 .32 1728 0\n" - "gpr G1 .32 1760 0\n" - "gpr G2 .32 1792 0\n" - "gpr G3 .32 1824 0\n" - "gpr G4 .32 1856 0\n" - "gpr G5 .32 1888 0\n" - "gpr G6 .32 1920 0\n" - "gpr G7 .32 1952 0\n" - "gpr G8 .32 1984 0\n" - "gpr G9 .32 2016 0\n" - "gpr G10 .32 2048 0\n" - "gpr G11 .32 2080 0\n" - "gpr G12 .32 2112 0\n" - "gpr G13 .32 2144 0\n" - "gpr G14 .32 2176 0\n" - "gpr G15 .32 2208 0\n" - "gpr G16 .32 2240 0\n" - "gpr G17 .32 2272 0\n" - "gpr G18 .32 2304 0\n" - "gpr G19 .32 2336 0\n" - "gpr G20 .32 2368 0\n" - "gpr G21 .32 2400 0\n" - "gpr G22 .32 2432 0\n" - "gpr G23 .32 2464 0\n" - "gpr G24 .32 2496 0\n" - "gpr G25 .32 2528 0\n" - "gpr G26 .32 2560 0\n" - "gpr G27 .32 2592 0\n" - "gpr G28 .32 2624 0\n" - "gpr G29 .32 2656 0\n" - "gpr G30 .32 2688 0\n" - "gpr G31 .32 2720 0\n" + "gpr R1:0 .64 4096 0\n" + "gpr R1:0_tmp .64 4160 0\n" + "gpr R3:2 .64 4224 0\n" + "gpr R3:2_tmp .64 4288 0\n" + "gpr R5:4 .64 4352 0\n" + "gpr R5:4_tmp .64 4416 0\n" + "gpr R7:6 .64 4480 0\n" + "gpr R7:6_tmp .64 4544 0\n" + "gpr R9:8 .64 4608 0\n" + "gpr R9:8_tmp .64 4672 0\n" + "gpr R11:10 .64 4736 0\n" + "gpr R11:10_tmp .64 4800 0\n" + "gpr R13:12 .64 4864 0\n" + "gpr R13:12_tmp .64 4928 0\n" + "gpr R15:14 .64 4992 0\n" + "gpr R15:14_tmp .64 5056 0\n" + "gpr R17:16 .64 5120 0\n" + "gpr R17:16_tmp .64 5184 0\n" + "gpr R19:18 .64 5248 0\n" + "gpr R19:18_tmp .64 5312 0\n" + "gpr R21:20 .64 5376 0\n" + "gpr R21:20_tmp .64 5440 0\n" + "gpr R23:22 .64 5504 0\n" + "gpr R23:22_tmp .64 5568 0\n" + "gpr R25:24 .64 5632 0\n" + "gpr R25:24_tmp .64 5696 0\n" + "gpr R27:26 .64 5760 0\n" + "gpr R27:26_tmp .64 5824 0\n" + "gpr R29:28 .64 5888 0\n" + "gpr R29:28_tmp .64 5952 0\n" + "gpr R31:30 .64 6016 0\n" + "gpr R31:30_tmp .64 6080 0\n" - "gpr G1:0 .64 1728 0\n" - "gpr G3:2 .64 1792 0\n" - "gpr G5:4 .64 1856 0\n" - "gpr G7:6 .64 1920 0\n" - "gpr G9:8 .64 1984 0\n" - "gpr G11:10 .64 2048 0\n" - "gpr G13:12 .64 2112 0\n" - "gpr G15:14 .64 2176 0\n" - "gpr G17:16 .64 2240 0\n" - "gpr G19:18 .64 2304 0\n" - "gpr G21:20 .64 2368 0\n" - "gpr G23:22 .64 2432 0\n" - "gpr G25:24 .64 2496 0\n" - "gpr G27:26 .64 2560 0\n" - "gpr G29:28 .64 2624 0\n" - "gpr G31:30 .64 2688 0\n" + "gpr G0 .32 6144 0\n" + "gpr G0_tmp .32 6176 0\n" + "gpr G1 .32 6208 0\n" + "gpr G1_tmp .32 6240 0\n" + "gpr G2 .32 6272 0\n" + "gpr G2_tmp .32 6304 0\n" + "gpr G3 .32 6336 0\n" + "gpr G3_tmp .32 6368 0\n" + "gpr G4 .32 6400 0\n" + "gpr G4_tmp .32 6432 0\n" + "gpr G5 .32 6464 0\n" + "gpr G5_tmp .32 6496 0\n" + "gpr G6 .32 6528 0\n" + "gpr G6_tmp .32 6560 0\n" + "gpr G7 .32 6592 0\n" + "gpr G7_tmp .32 6624 0\n" + "gpr G8 .32 6656 0\n" + "gpr G8_tmp .32 6688 0\n" + "gpr G9 .32 6720 0\n" + "gpr G9_tmp .32 6752 0\n" + "gpr G10 .32 6784 0\n" + "gpr G10_tmp .32 6816 0\n" + "gpr G11 .32 6848 0\n" + "gpr G11_tmp .32 6880 0\n" + "gpr G12 .32 6912 0\n" + "gpr G12_tmp .32 6944 0\n" + "gpr G13 .32 6976 0\n" + "gpr G13_tmp .32 7008 0\n" + "gpr G14 .32 7040 0\n" + "gpr G14_tmp .32 7072 0\n" + "gpr G15 .32 7104 0\n" + "gpr G15_tmp .32 7136 0\n" + "gpr G16 .32 7168 0\n" + "gpr G16_tmp .32 7200 0\n" + "gpr G17 .32 7232 0\n" + "gpr G17_tmp .32 7264 0\n" + "gpr G18 .32 7296 0\n" + "gpr G18_tmp .32 7328 0\n" + "gpr G19 .32 7360 0\n" + "gpr G19_tmp .32 7392 0\n" + "gpr G20 .32 7424 0\n" + "gpr G20_tmp .32 7456 0\n" + "gpr G21 .32 7488 0\n" + "gpr G21_tmp .32 7520 0\n" + "gpr G22 .32 7552 0\n" + "gpr G22_tmp .32 7584 0\n" + "gpr G23 .32 7616 0\n" + "gpr G23_tmp .32 7648 0\n" + "gpr G24 .32 7680 0\n" + "gpr G24_tmp .32 7712 0\n" + "gpr G25 .32 7744 0\n" + "gpr G25_tmp .32 7776 0\n" + "gpr G26 .32 7808 0\n" + "gpr G26_tmp .32 7840 0\n" + "gpr G27 .32 7872 0\n" + "gpr G27_tmp .32 7904 0\n" + "gpr G28 .32 7936 0\n" + "gpr G28_tmp .32 7968 0\n" + "gpr G29 .32 8000 0\n" + "gpr G29_tmp .32 8032 0\n" + "gpr G30 .32 8064 0\n" + "gpr G30_tmp .32 8096 0\n" + "gpr G31 .32 8128 0\n" + "gpr G31_tmp .32 8160 0\n" - "vcc Q0 .128 2752 0\n" - "vcc Q1 .128 2880 0\n" - "vcc Q2 .128 3008 0\n" - "vcc Q3 .128 3136 0\n" + "gpr G1:0 .64 8192 0\n" + "gpr G1:0_tmp .64 8256 0\n" + "gpr G3:2 .64 8320 0\n" + "gpr G3:2_tmp .64 8384 0\n" + "gpr G5:4 .64 8448 0\n" + "gpr G5:4_tmp .64 8512 0\n" + "gpr G7:6 .64 8576 0\n" + "gpr G7:6_tmp .64 8640 0\n" + "gpr G9:8 .64 8704 0\n" + "gpr G9:8_tmp .64 8768 0\n" + "gpr G11:10 .64 8832 0\n" + "gpr G11:10_tmp .64 8896 0\n" + "gpr G13:12 .64 8960 0\n" + "gpr G13:12_tmp .64 9024 0\n" + "gpr G15:14 .64 9088 0\n" + "gpr G15:14_tmp .64 9152 0\n" + "gpr G17:16 .64 9216 0\n" + "gpr G17:16_tmp .64 9280 0\n" + "gpr G19:18 .64 9344 0\n" + "gpr G19:18_tmp .64 9408 0\n" + "gpr G21:20 .64 9472 0\n" + "gpr G21:20_tmp .64 9536 0\n" + "gpr G23:22 .64 9600 0\n" + "gpr G23:22_tmp .64 9664 0\n" + "gpr G25:24 .64 9728 0\n" + "gpr G25:24_tmp .64 9792 0\n" + "gpr G27:26 .64 9856 0\n" + "gpr G27:26_tmp .64 9920 0\n" + "gpr G29:28 .64 9984 0\n" + "gpr G29:28_tmp .64 10048 0\n" + "gpr G31:30 .64 10112 0\n" + "gpr G31:30_tmp .64 10176 0\n" - "vc V3:0 .4096 3264 0\n" - "vc V7:4 .4096 7360 0\n" - "vc V11:8 .4096 11456 0\n" - "vc V15:12 .4096 15552 0\n" - "vc V19:16 .4096 19648 0\n" - "vc V23:20 .4096 23744 0\n" - "vc V27:24 .4096 27840 0\n" - "vc V31:28 .4096 31936 0\n" + "vcc Q0 .128 10240 0\n" + "vcc Q0_tmp .128 10368 0\n" + "vcc Q1 .128 10496 0\n" + "vcc Q1_tmp .128 10624 0\n" + "vcc Q2 .128 10752 0\n" + "vcc Q2_tmp .128 10880 0\n" + "vcc Q3 .128 11008 0\n" + "vcc Q3_tmp .128 11136 0\n" - "vc V0 .1024 3264 0\n" - "vc V1 .1024 4288 0\n" - "vc V2 .1024 5312 0\n" - "vc V3 .1024 6336 0\n" - "vc V4 .1024 7360 0\n" - "vc V5 .1024 8384 0\n" - "vc V6 .1024 9408 0\n" - "vc V7 .1024 10432 0\n" - "vc V8 .1024 11456 0\n" - "vc V9 .1024 12480 0\n" - "vc V10 .1024 13504 0\n" - "vc V11 .1024 14528 0\n" - "vc V12 .1024 15552 0\n" - "vc V13 .1024 16576 0\n" - "vc V14 .1024 17600 0\n" - "vc V15 .1024 18624 0\n" - "vc V16 .1024 19648 0\n" - "vc V17 .1024 20672 0\n" - "vc V18 .1024 21696 0\n" - "vc V19 .1024 22720 0\n" - "vc V20 .1024 23744 0\n" - "vc V21 .1024 24768 0\n" - "vc V22 .1024 25792 0\n" - "vc V23 .1024 26816 0\n" - "vc V24 .1024 27840 0\n" - "vc V25 .1024 28864 0\n" - "vc V26 .1024 29888 0\n" - "vc V27 .1024 30912 0\n" - "vc V28 .1024 31936 0\n" - "vc V29 .1024 32960 0\n" - "vc V30 .1024 33984 0\n" - "vc V31 .1024 35008 0\n" + "vc V3:0 .4096 11264 0\n" + "vc V3:0_tmp .4096 15360 0\n" + "vc V7:4 .4096 19456 0\n" + "vc V7:4_tmp .4096 23552 0\n" + "vc V11:8 .4096 27648 0\n" + "vc V11:8_tmp .4096 31744 0\n" + "vc V15:12 .4096 35840 0\n" + "vc V15:12_tmp .4096 39936 0\n" + "vc V19:16 .4096 44032 0\n" + "vc V19:16_tmp .4096 48128 0\n" + "vc V23:20 .4096 52224 0\n" + "vc V23:20_tmp .4096 56320 0\n" + "vc V27:24 .4096 60416 0\n" + "vc V27:24_tmp .4096 64512 0\n" + "vc V31:28 .4096 68608 0\n" + "vc V31:28_tmp .4096 72704 0\n" - "vc V1:0 .2048 3264 0\n" - "vc V3:2 .2048 5312 0\n" - "vc V5:4 .2048 7360 0\n" - "vc V7:6 .2048 9408 0\n" - "vc V9:8 .2048 11456 0\n" - "vc V11:10 .2048 13504 0\n" - "vc V13:12 .2048 15552 0\n" - "vc V15:14 .2048 17600 0\n" - "vc V17:16 .2048 19648 0\n" - "vc V19:18 .2048 21696 0\n" - "vc V21:20 .2048 23744 0\n" - "vc V23:22 .2048 25792 0\n" - "vc V25:24 .2048 27840 0\n" - "vc V27:26 .2048 29888 0\n" - "vc V29:28 .2048 31936 0\n" - "vc V31:30 .2048 33984 0\n" + "vc V0 .1024 76800 0\n" + "vc V0_tmp .1024 77824 0\n" + "vc V1 .1024 78848 0\n" + "vc V1_tmp .1024 79872 0\n" + "vc V2 .1024 80896 0\n" + "vc V2_tmp .1024 81920 0\n" + "vc V3 .1024 82944 0\n" + "vc V3_tmp .1024 83968 0\n" + "vc V4 .1024 84992 0\n" + "vc V4_tmp .1024 86016 0\n" + "vc V5 .1024 87040 0\n" + "vc V5_tmp .1024 88064 0\n" + "vc V6 .1024 89088 0\n" + "vc V6_tmp .1024 90112 0\n" + "vc V7 .1024 91136 0\n" + "vc V7_tmp .1024 92160 0\n" + "vc V8 .1024 93184 0\n" + "vc V8_tmp .1024 94208 0\n" + "vc V9 .1024 95232 0\n" + "vc V9_tmp .1024 96256 0\n" + "vc V10 .1024 97280 0\n" + "vc V10_tmp .1024 98304 0\n" + "vc V11 .1024 99328 0\n" + "vc V11_tmp .1024 100352 0\n" + "vc V12 .1024 101376 0\n" + "vc V12_tmp .1024 102400 0\n" + "vc V13 .1024 103424 0\n" + "vc V13_tmp .1024 104448 0\n" + "vc V14 .1024 105472 0\n" + "vc V14_tmp .1024 106496 0\n" + "vc V15 .1024 107520 0\n" + "vc V15_tmp .1024 108544 0\n" + "vc V16 .1024 109568 0\n" + "vc V16_tmp .1024 110592 0\n" + "vc V17 .1024 111616 0\n" + "vc V17_tmp .1024 112640 0\n" + "vc V18 .1024 113664 0\n" + "vc V18_tmp .1024 114688 0\n" + "vc V19 .1024 115712 0\n" + "vc V19_tmp .1024 116736 0\n" + "vc V20 .1024 117760 0\n" + "vc V20_tmp .1024 118784 0\n" + "vc V21 .1024 119808 0\n" + "vc V21_tmp .1024 120832 0\n" + "vc V22 .1024 121856 0\n" + "vc V22_tmp .1024 122880 0\n" + "vc V23 .1024 123904 0\n" + "vc V23_tmp .1024 124928 0\n" + "vc V24 .1024 125952 0\n" + "vc V24_tmp .1024 126976 0\n" + "vc V25 .1024 128000 0\n" + "vc V25_tmp .1024 129024 0\n" + "vc V26 .1024 130048 0\n" + "vc V26_tmp .1024 131072 0\n" + "vc V27 .1024 132096 0\n" + "vc V27_tmp .1024 133120 0\n" + "vc V28 .1024 134144 0\n" + "vc V28_tmp .1024 135168 0\n" + "vc V29 .1024 136192 0\n" + "vc V29_tmp .1024 137216 0\n" + "vc V30 .1024 138240 0\n" + "vc V30_tmp .1024 139264 0\n" + "vc V31 .1024 140288 0\n" + "vc V31_tmp .1024 141312 0\n" - "gpr R0 .32 0 0\n" - "gpr R1 .32 32 0\n" - "gpr R2 .32 64 0\n" - "gpr R3 .32 96 0\n" - "gpr R4 .32 128 0\n" - "gpr R5 .32 160 0\n" - "gpr R6 .32 192 0\n" - "gpr R7 .32 224 0\n" - "gpr R8 .32 256 0\n" - "gpr R9 .32 288 0\n" - "gpr R10 .32 320 0\n" - "gpr R11 .32 352 0\n" - "gpr R12 .32 384 0\n" - "gpr R13 .32 416 0\n" - "gpr R14 .32 448 0\n" - "gpr R15 .32 480 0\n" - "gpr R16 .32 512 0\n" - "gpr R17 .32 544 0\n" - "gpr R18 .32 576 0\n" - "gpr R19 .32 608 0\n" - "gpr R20 .32 640 0\n" - "gpr R21 .32 672 0\n" - "gpr R22 .32 704 0\n" - "gpr R23 .32 736 0\n" - "gpr R24 .32 768 0\n" - "gpr R25 .32 800 0\n" - "gpr R26 .32 832 0\n" - "gpr R27 .32 864 0\n" - "gpr R28 .32 896 0\n" - "gpr R29 .32 928 0\n" - "gpr R30 .32 960 0\n" - "gpr R31 .32 992 0\n" + "vc V1:0 .2048 142336 0\n" + "vc V1:0_tmp .2048 144384 0\n" + "vc V3:2 .2048 146432 0\n" + "vc V3:2_tmp .2048 148480 0\n" + "vc V5:4 .2048 150528 0\n" + "vc V5:4_tmp .2048 152576 0\n" + "vc V7:6 .2048 154624 0\n" + "vc V7:6_tmp .2048 156672 0\n" + "vc V9:8 .2048 158720 0\n" + "vc V9:8_tmp .2048 160768 0\n" + "vc V11:10 .2048 162816 0\n" + "vc V11:10_tmp .2048 164864 0\n" + "vc V13:12 .2048 166912 0\n" + "vc V13:12_tmp .2048 168960 0\n" + "vc V15:14 .2048 171008 0\n" + "vc V15:14_tmp .2048 173056 0\n" + "vc V17:16 .2048 175104 0\n" + "vc V17:16_tmp .2048 177152 0\n" + "vc V19:18 .2048 179200 0\n" + "vc V19:18_tmp .2048 181248 0\n" + "vc V21:20 .2048 183296 0\n" + "vc V21:20_tmp .2048 185344 0\n" + "vc V23:22 .2048 187392 0\n" + "vc V23:22_tmp .2048 189440 0\n" + "vc V25:24 .2048 191488 0\n" + "vc V25:24_tmp .2048 193536 0\n" + "vc V27:26 .2048 195584 0\n" + "vc V27:26_tmp .2048 197632 0\n" + "vc V29:28 .2048 199680 0\n" + "vc V29:28_tmp .2048 201728 0\n" + "vc V31:30 .2048 203776 0\n" + "vc V31:30_tmp .2048 205824 0\n" - "ctr P0 .8 1152 0\n" - "ctr P1 .8 1160 0\n" - "ctr P2 .8 1168 0\n" - "ctr P3 .8 1176 0\n" + "gpr R0 .32 207872 0\n" + "gpr R0_tmp .32 207904 0\n" + "gpr R1 .32 207936 0\n" + "gpr R1_tmp .32 207968 0\n" + "gpr R2 .32 208000 0\n" + "gpr R2_tmp .32 208032 0\n" + "gpr R3 .32 208064 0\n" + "gpr R3_tmp .32 208096 0\n" + "gpr R4 .32 208128 0\n" + "gpr R4_tmp .32 208160 0\n" + "gpr R5 .32 208192 0\n" + "gpr R5_tmp .32 208224 0\n" + "gpr R6 .32 208256 0\n" + "gpr R6_tmp .32 208288 0\n" + "gpr R7 .32 208320 0\n" + "gpr R7_tmp .32 208352 0\n" + "gpr R8 .32 208384 0\n" + "gpr R8_tmp .32 208416 0\n" + "gpr R9 .32 208448 0\n" + "gpr R9_tmp .32 208480 0\n" + "gpr R10 .32 208512 0\n" + "gpr R10_tmp .32 208544 0\n" + "gpr R11 .32 208576 0\n" + "gpr R11_tmp .32 208608 0\n" + "gpr R12 .32 208640 0\n" + "gpr R12_tmp .32 208672 0\n" + "gpr R13 .32 208704 0\n" + "gpr R13_tmp .32 208736 0\n" + "gpr R14 .32 208768 0\n" + "gpr R14_tmp .32 208800 0\n" + "gpr R15 .32 208832 0\n" + "gpr R15_tmp .32 208864 0\n" + "gpr R16 .32 208896 0\n" + "gpr R16_tmp .32 208928 0\n" + "gpr R17 .32 208960 0\n" + "gpr R17_tmp .32 208992 0\n" + "gpr R18 .32 209024 0\n" + "gpr R18_tmp .32 209056 0\n" + "gpr R19 .32 209088 0\n" + "gpr R19_tmp .32 209120 0\n" + "gpr R20 .32 209152 0\n" + "gpr R20_tmp .32 209184 0\n" + "gpr R21 .32 209216 0\n" + "gpr R21_tmp .32 209248 0\n" + "gpr R22 .32 209280 0\n" + "gpr R22_tmp .32 209312 0\n" + "gpr R23 .32 209344 0\n" + "gpr R23_tmp .32 209376 0\n" + "gpr R24 .32 209408 0\n" + "gpr R24_tmp .32 209440 0\n" + "gpr R25 .32 209472 0\n" + "gpr R25_tmp .32 209504 0\n" + "gpr R26 .32 209536 0\n" + "gpr R26_tmp .32 209568 0\n" + "gpr R27 .32 209600 0\n" + "gpr R27_tmp .32 209632 0\n" + "gpr R28 .32 209664 0\n" + "gpr R28_tmp .32 209696 0\n" + "gpr R29 .32 209728 0\n" + "gpr R29_tmp .32 209760 0\n" + "gpr R30 .32 209792 0\n" + "gpr R30_tmp .32 209824 0\n" + "gpr R31 .32 209856 0\n" + "gpr R31_tmp .32 209888 0\n" - "sys S0 .32 36032 0\n" - "sys S1 .32 36064 0\n" - "sys S2 .32 36096 0\n" - "sys S3 .32 36128 0\n" - "sys S4 .32 36160 0\n" - "sys S5 .32 36192 0\n" - "sys S6 .32 36224 0\n" - "sys S7 .32 36256 0\n" - "sys S8 .32 36288 0\n" - "sys S9 .32 36320 0\n" - "sys S10 .32 36352 0\n" - "sys S11 .32 36384 0\n" - "sys S12 .32 36416 0\n" - "sys S13 .32 36448 0\n" - "sys S14 .32 36480 0\n" - "sys S15 .32 36512 0\n" - "sys S16 .32 36544 0\n" - "sys S17 .32 36576 0\n" - "sys S18 .32 36608 0\n" - "sys S19 .32 36640 0\n" - "sys S20 .32 36672 0\n" - "sys S21 .32 36704 0\n" - "sys S22 .32 36736 0\n" - "sys S23 .32 36768 0\n" - "sys S24 .32 36800 0\n" - "sys S25 .32 36832 0\n" - "sys S26 .32 36864 0\n" - "sys S27 .32 36896 0\n" - "sys S28 .32 36928 0\n" - "sys S29 .32 36960 0\n" - "sys S30 .32 36992 0\n" - "sys S31 .32 37024 0\n" - "sys S32 .32 37056 0\n" - "sys S33 .32 37088 0\n" - "sys S34 .32 37120 0\n" - "sys S35 .32 37152 0\n" - "sys S36 .32 37184 0\n" - "sys S37 .32 37216 0\n" - "sys S38 .32 37248 0\n" - "sys S39 .32 37280 0\n" - "sys S40 .32 37312 0\n" - "sys S41 .32 37344 0\n" - "sys S42 .32 37376 0\n" - "sys S43 .32 37408 0\n" - "sys S44 .32 37440 0\n" - "sys S45 .32 37472 0\n" - "sys S46 .32 37504 0\n" - "sys S47 .32 37536 0\n" - "sys S48 .32 37568 0\n" - "sys S49 .32 37600 0\n" - "sys S50 .32 37632 0\n" - "sys S51 .32 37664 0\n" - "sys S52 .32 37696 0\n" - "sys S53 .32 37728 0\n" - "sys S54 .32 37760 0\n" - "sys S55 .32 37792 0\n" - "sys S56 .32 37824 0\n" - "sys S57 .32 37856 0\n" - "sys S58 .32 37888 0\n" - "sys S59 .32 37920 0\n" - "sys S60 .32 37952 0\n" - "sys S61 .32 37984 0\n" - "sys S62 .32 38016 0\n" - "sys S63 .32 38048 0\n" - "sys S64 .32 38080 0\n" - "sys S65 .32 38112 0\n" - "sys S66 .32 38144 0\n" - "sys S67 .32 38176 0\n" - "sys S68 .32 38208 0\n" - "sys S69 .32 38240 0\n" - "sys S70 .32 38272 0\n" - "sys S71 .32 38304 0\n" - "sys S72 .32 38336 0\n" - "sys S73 .32 38368 0\n" - "sys S74 .32 38400 0\n" - "sys S75 .32 38432 0\n" - "sys S76 .32 38464 0\n" - "sys S77 .32 38496 0\n" - "sys S78 .32 38528 0\n" - "sys S79 .32 38560 0\n" - "sys S80 .32 38592 0\n" + "ctr P0 .8 209920 0\n" + "ctr P0_tmp .8 209928 0\n" + "ctr P1 .8 209936 0\n" + "ctr P1_tmp .8 209944 0\n" + "ctr P2 .8 209952 0\n" + "ctr P2_tmp .8 209960 0\n" + "ctr P3 .8 209968 0\n" + "ctr P3_tmp .8 209976 0\n" - "sys S1:0 .64 36032 0\n" - "sys S3:2 .64 36096 0\n" - "sys S5:4 .64 36160 0\n" - "sys S7:6 .64 36224 0\n" - "sys S9:8 .64 36288 0\n" - "sys S11:10 .64 36352 0\n" - "sys S13:12 .64 36416 0\n" - "sys S15:14 .64 36480 0\n" - "sys S17:16 .64 36544 0\n" - "sys S19:18 .64 36608 0\n" - "sys S21:20 .64 36672 0\n" - "sys S23:22 .64 36736 0\n" - "sys S25:24 .64 36800 0\n" - "sys S27:26 .64 36864 0\n" - "sys S29:28 .64 36928 0\n" - "sys S31:30 .64 36992 0\n" - "sys S33:32 .64 37056 0\n" - "sys S35:34 .64 37120 0\n" - "sys S37:36 .64 37184 0\n" - "sys S39:38 .64 37248 0\n" - "sys S41:40 .64 37312 0\n" - "sys S43:42 .64 37376 0\n" - "sys S45:44 .64 37440 0\n" - "sys S47:46 .64 37504 0\n" - "sys S49:48 .64 37568 0\n" - "sys S51:50 .64 37632 0\n" - "sys S53:52 .64 37696 0\n" - "sys S55:54 .64 37760 0\n" - "sys S57:56 .64 37824 0\n" - "sys S59:58 .64 37888 0\n" - "sys S61:60 .64 37952 0\n" - "sys S63:62 .64 38016 0\n" - "sys S65:64 .64 38080 0\n" - "sys S67:66 .64 38144 0\n" - "sys S69:68 .64 38208 0\n" - "sys S71:70 .64 38272 0\n" - "sys S73:72 .64 38336 0\n" - "sys S75:74 .64 38400 0\n" - "sys S77:76 .64 38464 0\n" - "sys S79:78 .64 38528 0\n" + "sys S0 .32 209984 0\n" + "sys S0_tmp .32 210016 0\n" + "sys S1 .32 210048 0\n" + "sys S1_tmp .32 210080 0\n" + "sys S2 .32 210112 0\n" + "sys S2_tmp .32 210144 0\n" + "sys S3 .32 210176 0\n" + "sys S3_tmp .32 210208 0\n" + "sys S4 .32 210240 0\n" + "sys S4_tmp .32 210272 0\n" + "sys S5 .32 210304 0\n" + "sys S5_tmp .32 210336 0\n" + "sys S6 .32 210368 0\n" + "sys S6_tmp .32 210400 0\n" + "sys S7 .32 210432 0\n" + "sys S7_tmp .32 210464 0\n" + "sys S8 .32 210496 0\n" + "sys S8_tmp .32 210528 0\n" + "sys S9 .32 210560 0\n" + "sys S9_tmp .32 210592 0\n" + "sys S10 .32 210624 0\n" + "sys S10_tmp .32 210656 0\n" + "sys S11 .32 210688 0\n" + "sys S11_tmp .32 210720 0\n" + "sys S12 .32 210752 0\n" + "sys S12_tmp .32 210784 0\n" + "sys S13 .32 210816 0\n" + "sys S13_tmp .32 210848 0\n" + "sys S14 .32 210880 0\n" + "sys S14_tmp .32 210912 0\n" + "sys S15 .32 210944 0\n" + "sys S15_tmp .32 210976 0\n" + "sys S16 .32 211008 0\n" + "sys S16_tmp .32 211040 0\n" + "sys S17 .32 211072 0\n" + "sys S17_tmp .32 211104 0\n" + "sys S18 .32 211136 0\n" + "sys S18_tmp .32 211168 0\n" + "sys S19 .32 211200 0\n" + "sys S19_tmp .32 211232 0\n" + "sys S20 .32 211264 0\n" + "sys S20_tmp .32 211296 0\n" + "sys S21 .32 211328 0\n" + "sys S21_tmp .32 211360 0\n" + "sys S22 .32 211392 0\n" + "sys S22_tmp .32 211424 0\n" + "sys S23 .32 211456 0\n" + "sys S23_tmp .32 211488 0\n" + "sys S24 .32 211520 0\n" + "sys S24_tmp .32 211552 0\n" + "sys S25 .32 211584 0\n" + "sys S25_tmp .32 211616 0\n" + "sys S26 .32 211648 0\n" + "sys S26_tmp .32 211680 0\n" + "sys S27 .32 211712 0\n" + "sys S27_tmp .32 211744 0\n" + "sys S28 .32 211776 0\n" + "sys S28_tmp .32 211808 0\n" + "sys S29 .32 211840 0\n" + "sys S29_tmp .32 211872 0\n" + "sys S30 .32 211904 0\n" + "sys S30_tmp .32 211936 0\n" + "sys S31 .32 211968 0\n" + "sys S31_tmp .32 212000 0\n" + "sys S32 .32 212032 0\n" + "sys S32_tmp .32 212064 0\n" + "sys S33 .32 212096 0\n" + "sys S33_tmp .32 212128 0\n" + "sys S34 .32 212160 0\n" + "sys S34_tmp .32 212192 0\n" + "sys S35 .32 212224 0\n" + "sys S35_tmp .32 212256 0\n" + "sys S36 .32 212288 0\n" + "sys S36_tmp .32 212320 0\n" + "sys S37 .32 212352 0\n" + "sys S37_tmp .32 212384 0\n" + "sys S38 .32 212416 0\n" + "sys S38_tmp .32 212448 0\n" + "sys S39 .32 212480 0\n" + "sys S39_tmp .32 212512 0\n" + "sys S40 .32 212544 0\n" + "sys S40_tmp .32 212576 0\n" + "sys S41 .32 212608 0\n" + "sys S41_tmp .32 212640 0\n" + "sys S42 .32 212672 0\n" + "sys S42_tmp .32 212704 0\n" + "sys S43 .32 212736 0\n" + "sys S43_tmp .32 212768 0\n" + "sys S44 .32 212800 0\n" + "sys S44_tmp .32 212832 0\n" + "sys S45 .32 212864 0\n" + "sys S45_tmp .32 212896 0\n" + "sys S46 .32 212928 0\n" + "sys S46_tmp .32 212960 0\n" + "sys S47 .32 212992 0\n" + "sys S47_tmp .32 213024 0\n" + "sys S48 .32 213056 0\n" + "sys S48_tmp .32 213088 0\n" + "sys S49 .32 213120 0\n" + "sys S49_tmp .32 213152 0\n" + "sys S50 .32 213184 0\n" + "sys S50_tmp .32 213216 0\n" + "sys S51 .32 213248 0\n" + "sys S51_tmp .32 213280 0\n" + "sys S52 .32 213312 0\n" + "sys S52_tmp .32 213344 0\n" + "sys S53 .32 213376 0\n" + "sys S53_tmp .32 213408 0\n" + "sys S54 .32 213440 0\n" + "sys S54_tmp .32 213472 0\n" + "sys S55 .32 213504 0\n" + "sys S55_tmp .32 213536 0\n" + "sys S56 .32 213568 0\n" + "sys S56_tmp .32 213600 0\n" + "sys S57 .32 213632 0\n" + "sys S57_tmp .32 213664 0\n" + "sys S58 .32 213696 0\n" + "sys S58_tmp .32 213728 0\n" + "sys S59 .32 213760 0\n" + "sys S59_tmp .32 213792 0\n" + "sys S60 .32 213824 0\n" + "sys S60_tmp .32 213856 0\n" + "sys S61 .32 213888 0\n" + "sys S61_tmp .32 213920 0\n" + "sys S62 .32 213952 0\n" + "sys S62_tmp .32 213984 0\n" + "sys S63 .32 214016 0\n" + "sys S63_tmp .32 214048 0\n" + "sys S64 .32 214080 0\n" + "sys S64_tmp .32 214112 0\n" + "sys S65 .32 214144 0\n" + "sys S65_tmp .32 214176 0\n" + "sys S66 .32 214208 0\n" + "sys S66_tmp .32 214240 0\n" + "sys S67 .32 214272 0\n" + "sys S67_tmp .32 214304 0\n" + "sys S68 .32 214336 0\n" + "sys S68_tmp .32 214368 0\n" + "sys S69 .32 214400 0\n" + "sys S69_tmp .32 214432 0\n" + "sys S70 .32 214464 0\n" + "sys S70_tmp .32 214496 0\n" + "sys S71 .32 214528 0\n" + "sys S71_tmp .32 214560 0\n" + "sys S72 .32 214592 0\n" + "sys S72_tmp .32 214624 0\n" + "sys S73 .32 214656 0\n" + "sys S73_tmp .32 214688 0\n" + "sys S74 .32 214720 0\n" + "sys S74_tmp .32 214752 0\n" + "sys S75 .32 214784 0\n" + "sys S75_tmp .32 214816 0\n" + "sys S76 .32 214848 0\n" + "sys S76_tmp .32 214880 0\n" + "sys S77 .32 214912 0\n" + "sys S77_tmp .32 214944 0\n" + "sys S78 .32 214976 0\n" + "sys S78_tmp .32 215008 0\n" + "sys S79 .32 215040 0\n" + "sys S79_tmp .32 215072 0\n" + "sys S80 .32 215104 0\n" + "sys S80_tmp .32 215136 0\n" - "ctr C0_tmp .32 39648 0\n" - "ctr C1_tmp .32 39680 0\n" - "ctr C2_tmp .32 39712 0\n" - "ctr C3_tmp .32 39744 0\n" - "ctr C4_tmp .32 39776 0\n" - "ctr C5_tmp .32 39808 0\n" - "ctr C6_tmp .32 39840 0\n" - "ctr C7_tmp .32 39872 0\n" - "ctr C8_tmp .32 39904 0\n" - "ctr C9_tmp .32 39936 0\n" - "ctr C10_tmp .32 39968 0\n" - "ctr C11_tmp .32 40000 0\n" - "ctr C12_tmp .32 40032 0\n" - "ctr C13_tmp .32 40064 0\n" - "ctr C14_tmp .32 40096 0\n" - "ctr C15_tmp .32 40128 0\n" - "ctr C16_tmp .32 40160 0\n" - "ctr C17_tmp .32 40192 0\n" - "ctr C18_tmp .32 40224 0\n" - "ctr C19_tmp .32 40256 0\n" - "ctr C30_tmp .32 40288 0\n" - "ctr C31_tmp .32 40320 0\n" - "ctr C1:0_tmp .64 39648 0\n" - "ctr C3:2_tmp .64 39712 0\n" - "ctr C5:4_tmp .64 39776 0\n" - "ctr C7:6_tmp .64 39840 0\n" - "ctr C9:8_tmp .64 39904 0\n" - "ctr C11:10_tmp .64 39968 0\n" - "ctr C13:12_tmp .64 40032 0\n" - "ctr C15:14_tmp .64 40096 0\n" - "ctr C17:16_tmp .64 40160 0\n" - "ctr C19:18_tmp .64 40224 0\n" - "ctr C31:30_tmp .64 40288 0\n" - "gpr R1:0_tmp .64 38624 0\n" - "gpr R3:2_tmp .64 38688 0\n" - "gpr R5:4_tmp .64 38752 0\n" - "gpr R7:6_tmp .64 38816 0\n" - "gpr R9:8_tmp .64 38880 0\n" - "gpr R11:10_tmp .64 38944 0\n" - "gpr R13:12_tmp .64 39008 0\n" - "gpr R15:14_tmp .64 39072 0\n" - "gpr R17:16_tmp .64 39136 0\n" - "gpr R19:18_tmp .64 39200 0\n" - "gpr R21:20_tmp .64 39264 0\n" - "gpr R23:22_tmp .64 39328 0\n" - "gpr R25:24_tmp .64 39392 0\n" - "gpr R27:26_tmp .64 39456 0\n" - "gpr R29:28_tmp .64 39520 0\n" - "gpr R31:30_tmp .64 39584 0\n" - "gpr G0_tmp .32 40352 0\n" - "gpr G1_tmp .32 40384 0\n" - "gpr G2_tmp .32 40416 0\n" - "gpr G3_tmp .32 40448 0\n" - "gpr G4_tmp .32 40480 0\n" - "gpr G5_tmp .32 40512 0\n" - "gpr G6_tmp .32 40544 0\n" - "gpr G7_tmp .32 40576 0\n" - "gpr G8_tmp .32 40608 0\n" - "gpr G9_tmp .32 40640 0\n" - "gpr G10_tmp .32 40672 0\n" - "gpr G11_tmp .32 40704 0\n" - "gpr G12_tmp .32 40736 0\n" - "gpr G13_tmp .32 40768 0\n" - "gpr G14_tmp .32 40800 0\n" - "gpr G15_tmp .32 40832 0\n" - "gpr G16_tmp .32 40864 0\n" - "gpr G17_tmp .32 40896 0\n" - "gpr G18_tmp .32 40928 0\n" - "gpr G19_tmp .32 40960 0\n" - "gpr G20_tmp .32 40992 0\n" - "gpr G21_tmp .32 41024 0\n" - "gpr G22_tmp .32 41056 0\n" - "gpr G23_tmp .32 41088 0\n" - "gpr G24_tmp .32 41120 0\n" - "gpr G25_tmp .32 41152 0\n" - "gpr G26_tmp .32 41184 0\n" - "gpr G27_tmp .32 41216 0\n" - "gpr G28_tmp .32 41248 0\n" - "gpr G29_tmp .32 41280 0\n" - "gpr G30_tmp .32 41312 0\n" - "gpr G31_tmp .32 41344 0\n" - "gpr G1:0_tmp .64 40352 0\n" - "gpr G3:2_tmp .64 40416 0\n" - "gpr G5:4_tmp .64 40480 0\n" - "gpr G7:6_tmp .64 40544 0\n" - "gpr G9:8_tmp .64 40608 0\n" - "gpr G11:10_tmp .64 40672 0\n" - "gpr G13:12_tmp .64 40736 0\n" - "gpr G15:14_tmp .64 40800 0\n" - "gpr G17:16_tmp .64 40864 0\n" - "gpr G19:18_tmp .64 40928 0\n" - "gpr G21:20_tmp .64 40992 0\n" - "gpr G23:22_tmp .64 41056 0\n" - "gpr G25:24_tmp .64 41120 0\n" - "gpr G27:26_tmp .64 41184 0\n" - "gpr G29:28_tmp .64 41248 0\n" - "gpr G31:30_tmp .64 41312 0\n" - "vcc Q0_tmp .128 41376 0\n" - "vcc Q1_tmp .128 41504 0\n" - "vcc Q2_tmp .128 41632 0\n" - "vcc Q3_tmp .128 41760 0\n" - "vc V3:0_tmp .4096 41888 0\n" - "vc V7:4_tmp .4096 45984 0\n" - "vc V11:8_tmp .4096 50080 0\n" - "vc V15:12_tmp .4096 54176 0\n" - "vc V19:16_tmp .4096 58272 0\n" - "vc V23:20_tmp .4096 62368 0\n" - "vc V27:24_tmp .4096 66464 0\n" - "vc V31:28_tmp .4096 70560 0\n" - "vc V0_tmp .1024 41888 0\n" - "vc V1_tmp .1024 42912 0\n" - "vc V2_tmp .1024 43936 0\n" - "vc V3_tmp .1024 44960 0\n" - "vc V4_tmp .1024 45984 0\n" - "vc V5_tmp .1024 47008 0\n" - "vc V6_tmp .1024 48032 0\n" - "vc V7_tmp .1024 49056 0\n" - "vc V8_tmp .1024 50080 0\n" - "vc V9_tmp .1024 51104 0\n" - "vc V10_tmp .1024 52128 0\n" - "vc V11_tmp .1024 53152 0\n" - "vc V12_tmp .1024 54176 0\n" - "vc V13_tmp .1024 55200 0\n" - "vc V14_tmp .1024 56224 0\n" - "vc V15_tmp .1024 57248 0\n" - "vc V16_tmp .1024 58272 0\n" - "vc V17_tmp .1024 59296 0\n" - "vc V18_tmp .1024 60320 0\n" - "vc V19_tmp .1024 61344 0\n" - "vc V20_tmp .1024 62368 0\n" - "vc V21_tmp .1024 63392 0\n" - "vc V22_tmp .1024 64416 0\n" - "vc V23_tmp .1024 65440 0\n" - "vc V24_tmp .1024 66464 0\n" - "vc V25_tmp .1024 67488 0\n" - "vc V26_tmp .1024 68512 0\n" - "vc V27_tmp .1024 69536 0\n" - "vc V28_tmp .1024 70560 0\n" - "vc V29_tmp .1024 71584 0\n" - "vc V30_tmp .1024 72608 0\n" - "vc V31_tmp .1024 73632 0\n" - "vc V1:0_tmp .2048 41888 0\n" - "vc V3:2_tmp .2048 43936 0\n" - "vc V5:4_tmp .2048 45984 0\n" - "vc V7:6_tmp .2048 48032 0\n" - "vc V9:8_tmp .2048 50080 0\n" - "vc V11:10_tmp .2048 52128 0\n" - "vc V13:12_tmp .2048 54176 0\n" - "vc V15:14_tmp .2048 56224 0\n" - "vc V17:16_tmp .2048 58272 0\n" - "vc V19:18_tmp .2048 60320 0\n" - "vc V21:20_tmp .2048 62368 0\n" - "vc V23:22_tmp .2048 64416 0\n" - "vc V25:24_tmp .2048 66464 0\n" - "vc V27:26_tmp .2048 68512 0\n" - "vc V29:28_tmp .2048 70560 0\n" - "vc V31:30_tmp .2048 72608 0\n" - "gpr R0_tmp .32 38624 0\n" - "gpr R1_tmp .32 38656 0\n" - "gpr R2_tmp .32 38688 0\n" - "gpr R3_tmp .32 38720 0\n" - "gpr R4_tmp .32 38752 0\n" - "gpr R5_tmp .32 38784 0\n" - "gpr R6_tmp .32 38816 0\n" - "gpr R7_tmp .32 38848 0\n" - "gpr R8_tmp .32 38880 0\n" - "gpr R9_tmp .32 38912 0\n" - "gpr R10_tmp .32 38944 0\n" - "gpr R11_tmp .32 38976 0\n" - "gpr R12_tmp .32 39008 0\n" - "gpr R13_tmp .32 39040 0\n" - "gpr R14_tmp .32 39072 0\n" - "gpr R15_tmp .32 39104 0\n" - "gpr R16_tmp .32 39136 0\n" - "gpr R17_tmp .32 39168 0\n" - "gpr R18_tmp .32 39200 0\n" - "gpr R19_tmp .32 39232 0\n" - "gpr R20_tmp .32 39264 0\n" - "gpr R21_tmp .32 39296 0\n" - "gpr R22_tmp .32 39328 0\n" - "gpr R23_tmp .32 39360 0\n" - "gpr R24_tmp .32 39392 0\n" - "gpr R25_tmp .32 39424 0\n" - "gpr R26_tmp .32 39456 0\n" - "gpr R27_tmp .32 39488 0\n" - "gpr R28_tmp .32 39520 0\n" - "gpr R29_tmp .32 39552 0\n" - "gpr R30_tmp .32 39584 0\n" - "gpr R31_tmp .32 39616 0\n" - "ctr P0_tmp .8 39776 0\n" - "ctr P1_tmp .8 39784 0\n" - "ctr P2_tmp .8 39792 0\n" - "ctr P3_tmp .8 39800 0\n" - "sys S0_tmp .32 74656 0\n" - "sys S1_tmp .32 74688 0\n" - "sys S2_tmp .32 74720 0\n" - "sys S3_tmp .32 74752 0\n" - "sys S4_tmp .32 74784 0\n" - "sys S5_tmp .32 74816 0\n" - "sys S6_tmp .32 74848 0\n" - "sys S7_tmp .32 74880 0\n" - "sys S8_tmp .32 74912 0\n" - "sys S9_tmp .32 74944 0\n" - "sys S10_tmp .32 74976 0\n" - "sys S11_tmp .32 75008 0\n" - "sys S12_tmp .32 75040 0\n" - "sys S13_tmp .32 75072 0\n" - "sys S14_tmp .32 75104 0\n" - "sys S15_tmp .32 75136 0\n" - "sys S16_tmp .32 75168 0\n" - "sys S17_tmp .32 75200 0\n" - "sys S18_tmp .32 75232 0\n" - "sys S19_tmp .32 75264 0\n" - "sys S20_tmp .32 75296 0\n" - "sys S21_tmp .32 75328 0\n" - "sys S22_tmp .32 75360 0\n" - "sys S23_tmp .32 75392 0\n" - "sys S24_tmp .32 75424 0\n" - "sys S25_tmp .32 75456 0\n" - "sys S26_tmp .32 75488 0\n" - "sys S27_tmp .32 75520 0\n" - "sys S28_tmp .32 75552 0\n" - "sys S29_tmp .32 75584 0\n" - "sys S30_tmp .32 75616 0\n" - "sys S31_tmp .32 75648 0\n" - "sys S32_tmp .32 75680 0\n" - "sys S33_tmp .32 75712 0\n" - "sys S34_tmp .32 75744 0\n" - "sys S35_tmp .32 75776 0\n" - "sys S36_tmp .32 75808 0\n" - "sys S37_tmp .32 75840 0\n" - "sys S38_tmp .32 75872 0\n" - "sys S39_tmp .32 75904 0\n" - "sys S40_tmp .32 75936 0\n" - "sys S41_tmp .32 75968 0\n" - "sys S42_tmp .32 76000 0\n" - "sys S43_tmp .32 76032 0\n" - "sys S44_tmp .32 76064 0\n" - "sys S45_tmp .32 76096 0\n" - "sys S46_tmp .32 76128 0\n" - "sys S47_tmp .32 76160 0\n" - "sys S48_tmp .32 76192 0\n" - "sys S49_tmp .32 76224 0\n" - "sys S50_tmp .32 76256 0\n" - "sys S51_tmp .32 76288 0\n" - "sys S52_tmp .32 76320 0\n" - "sys S53_tmp .32 76352 0\n" - "sys S54_tmp .32 76384 0\n" - "sys S55_tmp .32 76416 0\n" - "sys S56_tmp .32 76448 0\n" - "sys S57_tmp .32 76480 0\n" - "sys S58_tmp .32 76512 0\n" - "sys S59_tmp .32 76544 0\n" - "sys S60_tmp .32 76576 0\n" - "sys S61_tmp .32 76608 0\n" - "sys S62_tmp .32 76640 0\n" - "sys S63_tmp .32 76672 0\n" - "sys S64_tmp .32 76704 0\n" - "sys S65_tmp .32 76736 0\n" - "sys S66_tmp .32 76768 0\n" - "sys S67_tmp .32 76800 0\n" - "sys S68_tmp .32 76832 0\n" - "sys S69_tmp .32 76864 0\n" - "sys S70_tmp .32 76896 0\n" - "sys S71_tmp .32 76928 0\n" - "sys S72_tmp .32 76960 0\n" - "sys S73_tmp .32 76992 0\n" - "sys S74_tmp .32 77024 0\n" - "sys S75_tmp .32 77056 0\n" - "sys S76_tmp .32 77088 0\n" - "sys S77_tmp .32 77120 0\n" - "sys S78_tmp .32 77152 0\n" - "sys S79_tmp .32 77184 0\n" - "sys S80_tmp .32 77216 0\n" - "sys S1:0_tmp .64 74656 0\n" - "sys S3:2_tmp .64 74720 0\n" - "sys S5:4_tmp .64 74784 0\n" - "sys S7:6_tmp .64 74848 0\n" - "sys S9:8_tmp .64 74912 0\n" - "sys S11:10_tmp .64 74976 0\n" - "sys S13:12_tmp .64 75040 0\n" - "sys S15:14_tmp .64 75104 0\n" - "sys S17:16_tmp .64 75168 0\n" - "sys S19:18_tmp .64 75232 0\n" - "sys S21:20_tmp .64 75296 0\n" - "sys S23:22_tmp .64 75360 0\n" - "sys S25:24_tmp .64 75424 0\n" - "sys S27:26_tmp .64 75488 0\n" - "sys S29:28_tmp .64 75552 0\n" - "sys S31:30_tmp .64 75616 0\n" - "sys S33:32_tmp .64 75680 0\n" - "sys S35:34_tmp .64 75744 0\n" - "sys S37:36_tmp .64 75808 0\n" - "sys S39:38_tmp .64 75872 0\n" - "sys S41:40_tmp .64 75936 0\n" - "sys S43:42_tmp .64 76000 0\n" - "sys S45:44_tmp .64 76064 0\n" - "sys S47:46_tmp .64 76128 0\n" - "sys S49:48_tmp .64 76192 0\n" - "sys S51:50_tmp .64 76256 0\n" - "sys S53:52_tmp .64 76320 0\n" - "sys S55:54_tmp .64 76384 0\n" - "sys S57:56_tmp .64 76448 0\n" - "sys S59:58_tmp .64 76512 0\n" - "sys S61:60_tmp .64 76576 0\n" - "sys S63:62_tmp .64 76640 0\n" - "sys S65:64_tmp .64 76704 0\n" - "sys S67:66_tmp .64 76768 0\n" - "sys S69:68_tmp .64 76832 0\n" - "sys S71:70_tmp .64 76896 0\n" - "sys S73:72_tmp .64 76960 0\n" - "sys S75:74_tmp .64 77024 0\n" - "sys S77:76_tmp .64 77088 0\n"; + "sys S1:0 .64 215168 0\n" + "sys S1:0_tmp .64 215232 0\n" + "sys S3:2 .64 215296 0\n" + "sys S3:2_tmp .64 215360 0\n" + "sys S5:4 .64 215424 0\n" + "sys S5:4_tmp .64 215488 0\n" + "sys S7:6 .64 215552 0\n" + "sys S7:6_tmp .64 215616 0\n" + "sys S9:8 .64 215680 0\n" + "sys S9:8_tmp .64 215744 0\n" + "sys S11:10 .64 215808 0\n" + "sys S11:10_tmp .64 215872 0\n" + "sys S13:12 .64 215936 0\n" + "sys S13:12_tmp .64 216000 0\n" + "sys S15:14 .64 216064 0\n" + "sys S15:14_tmp .64 216128 0\n" + "sys S17:16 .64 216192 0\n" + "sys S17:16_tmp .64 216256 0\n" + "sys S19:18 .64 216320 0\n" + "sys S19:18_tmp .64 216384 0\n" + "sys S21:20 .64 216448 0\n" + "sys S21:20_tmp .64 216512 0\n" + "sys S23:22 .64 216576 0\n" + "sys S23:22_tmp .64 216640 0\n" + "sys S25:24 .64 216704 0\n" + "sys S25:24_tmp .64 216768 0\n" + "sys S27:26 .64 216832 0\n" + "sys S27:26_tmp .64 216896 0\n" + "sys S29:28 .64 216960 0\n" + "sys S29:28_tmp .64 217024 0\n" + "sys S31:30 .64 217088 0\n" + "sys S31:30_tmp .64 217152 0\n" + "sys S33:32 .64 217216 0\n" + "sys S33:32_tmp .64 217280 0\n" + "sys S35:34 .64 217344 0\n" + "sys S35:34_tmp .64 217408 0\n" + "sys S37:36 .64 217472 0\n" + "sys S37:36_tmp .64 217536 0\n" + "sys S39:38 .64 217600 0\n" + "sys S39:38_tmp .64 217664 0\n" + "sys S41:40 .64 217728 0\n" + "sys S41:40_tmp .64 217792 0\n" + "sys S43:42 .64 217856 0\n" + "sys S43:42_tmp .64 217920 0\n" + "sys S45:44 .64 217984 0\n" + "sys S45:44_tmp .64 218048 0\n" + "sys S47:46 .64 218112 0\n" + "sys S47:46_tmp .64 218176 0\n" + "sys S49:48 .64 218240 0\n" + "sys S49:48_tmp .64 218304 0\n" + "sys S51:50 .64 218368 0\n" + "sys S51:50_tmp .64 218432 0\n" + "sys S53:52 .64 218496 0\n" + "sys S53:52_tmp .64 218560 0\n" + "sys S55:54 .64 218624 0\n" + "sys S55:54_tmp .64 218688 0\n" + "sys S57:56 .64 218752 0\n" + "sys S57:56_tmp .64 218816 0\n" + "sys S59:58 .64 218880 0\n" + "sys S59:58_tmp .64 218944 0\n" + "sys S61:60 .64 219008 0\n" + "sys S61:60_tmp .64 219072 0\n" + "sys S63:62 .64 219136 0\n" + "sys S63:62_tmp .64 219200 0\n" + "sys S65:64 .64 219264 0\n" + "sys S65:64_tmp .64 219328 0\n" + "sys S67:66 .64 219392 0\n" + "sys S67:66_tmp .64 219456 0\n" + "sys S69:68 .64 219520 0\n" + "sys S69:68_tmp .64 219584 0\n" + "sys S71:70 .64 219648 0\n" + "sys S71:70_tmp .64 219712 0\n" + "sys S73:72 .64 219776 0\n" + "sys S73:72_tmp .64 219840 0\n" + "sys S75:74 .64 219904 0\n" + "sys S75:74_tmp .64 219968 0\n" + "sys S77:76 .64 220032 0\n" + "sys S77:76_tmp .64 220096 0\n" + "sys S79:78 .64 220160 0\n" + "sys S79:78_tmp .64 220224 0\n"; return strdup(p); } + RzAnalysisPlugin rz_analysis_plugin_hexagon = { .name = "hexagon", .desc = "Qualcomm Hexagon (QDSP6) V6", @@ -702,4 +757,5 @@ RzAnalysisPlugin rz_analysis_plugin_hexagon = { .op = hexagon_v6_op, .esil = false, .get_reg_profile = get_reg_profile, + .il_config = rz_hexagon_il_config, }; diff --git a/librz/arch/p/asm/asm_hexagon.c b/librz/arch/p/asm/asm_hexagon.c index 35c41864c9d..9567f70d013 100644 --- a/librz/arch/p/asm/asm_hexagon.c +++ b/librz/arch/p/asm/asm_hexagon.c @@ -1,9 +1,9 @@ // SPDX-FileCopyrightText: 2021 Rot127 // SPDX-License-Identifier: LGPL-3.0-only -// LLVM commit: 96e220e6886868d6663d966ecc396befffc355e7 -// LLVM commit date: 2022-01-05 11:01:52 +0000 (ISO 8601 format) -// Date of code generation: 2022-09-12 14:26:04-04:00 +// LLVM commit: b6f51787f6c8e77143f0aef6b58ddc7c55741d5c +// LLVM commit date: 2023-11-15 07:10:59 -0800 (ISO 8601 format) +// Date of code generation: 2024-03-16 06:22:39-05:00 //======================================== // The following code is generated. // Do not edit. Repository of code generator: @@ -19,13 +19,8 @@ #include #include -static RZ_OWN RzPVector /**/ *get_token_patterns(HexState *state) { - RzPVector *pvec = state->token_patterns; - if (pvec) { - return pvec; - } - - pvec = rz_pvector_new(rz_asm_token_pattern_free); +static RZ_OWN RzPVector /**/ *get_token_patterns() { + RzPVector *pvec = rz_pvector_new(rz_asm_token_pattern_free); RzAsmTokenPattern *pat = RZ_NEW0(RzAsmTokenPattern); pat->type = RZ_ASM_TOKEN_META; @@ -121,7 +116,7 @@ static RZ_OWN RzPVector /**/ *get_token_patterns(HexState * */ static bool hex_cfg_set(void *user, void *data) { rz_return_val_if_fail(user && data, false); - HexState *state = hexagon_get_state(); + HexState *state = hexagon_state(false); if (!state) { return false; } @@ -140,11 +135,26 @@ static bool hex_cfg_set(void *user, void *data) { return false; } +RZ_IPI void hexagon_state_fini(HexState *state) { + if (!state) { + return; + } + rz_config_free(state->cfg); + rz_pvector_free(state->token_patterns); + rz_list_free(state->const_ext_l); + return; +} + +static bool hexagon_fini(void *user) { + hexagon_state_fini(hexagon_state(false)); + hexagon_state(true); + return true; +} + static bool hexagon_init(void **user) { - HexState *state = hexagon_get_state(); + HexState *state = hexagon_state(false); rz_return_val_if_fail(state, false); - *user = state; // user = RzAsm.plugin_data state->cfg = rz_config_new(state); rz_return_val_if_fail(state->cfg, false); @@ -155,26 +165,16 @@ static bool hexagon_init(void **user) { SETCB("plugins.hexagon.sdk", "false", &hex_cfg_set, "Print packet syntax in objdump style."); SETCB("plugins.hexagon.reg.alias", "true", &hex_cfg_set, "Print the alias of registers (Alias from C0 = SA0)."); - state->token_patterns = get_token_patterns(state); - rz_asm_compile_token_patterns(state->token_patterns); - - return true; -} - -static bool hexagon_fini(void *user) { - HexState *state = (HexState *)user; - rz_return_val_if_fail(state, false); - - if (state->token_patterns) { - rz_pvector_free(state->token_patterns); - state->token_patterns = NULL; + if (!state->token_patterns) { + state->token_patterns = get_token_patterns(); } + rz_asm_compile_token_patterns(state->token_patterns); return true; } -RZ_API RZ_BORROW RzConfig *hexagon_get_config(void) { - HexState *state = hexagon_get_state(); +RZ_API RZ_BORROW RzConfig *hexagon_get_config() { + HexState *state = hexagon_state(false); rz_return_val_if_fail(state, NULL); return state->cfg; } @@ -190,14 +190,27 @@ RZ_API RZ_BORROW RzConfig *hexagon_get_config(void) { */ static int disassemble(RzAsm *a, RzAsmOp *op, const ut8 *buf, int l) { rz_return_val_if_fail(a && op && buf, -1); - if (l < 4) { + if (l < HEX_INSN_SIZE) { return -1; } ut32 addr = (ut32)a->pc; + // Disassemble as many instructions as possible from the buffer. + ut32 buf_offset = 0; + while (buf_offset + HEX_INSN_SIZE <= l && buf_offset <= HEX_INSN_SIZE * HEX_MAX_INSN_PER_PKT) { + const ut32 buf_ptr = rz_read_at_le32(buf, buf_offset); + if (buf_offset > 0 && (buf_ptr == HEX_INVALID_INSN_0 || buf_ptr == HEX_INVALID_INSN_F)) { + // Do not disassemble invalid instructions, if we already have a valid one. + break; + } + + HexReversedOpcode rev = { .action = HEXAGON_DISAS, .ana_op = NULL, .asm_op = op }; + hexagon_reverse_opcode(a, &rev, buf + buf_offset, addr + buf_offset, false); + buf_offset += HEX_INSN_SIZE; + } + // Copy operation actually requested. HexReversedOpcode rev = { .action = HEXAGON_DISAS, .ana_op = NULL, .asm_op = op }; - - hexagon_reverse_opcode(a, &rev, buf, addr); - return op->size; + hexagon_reverse_opcode(a, &rev, buf, addr, true); + return HEX_INSN_SIZE; } RzAsmPlugin rz_asm_plugin_hexagon = { @@ -208,7 +221,7 @@ RzAsmPlugin rz_asm_plugin_hexagon = { .bits = 32, .desc = "Qualcomm Hexagon (QDSP6) V6", .init = &hexagon_init, - .fini = hexagon_fini, + .fini = &hexagon_fini, .disassemble = &disassemble, .get_config = &hexagon_get_config, }; diff --git a/librz/arch/types/cc-hexagon-32.sdb.txt b/librz/arch/types/cc-hexagon-32.sdb.txt index 353794bfc13..0aea012f130 100644 --- a/librz/arch/types/cc-hexagon-32.sdb.txt +++ b/librz/arch/types/cc-hexagon-32.sdb.txt @@ -1,33 +1,33 @@ default.cc=hexagon hexagon=cc -cc.hexagon.arg0=r0 -cc.hexagon.arg1=r1 -cc.hexagon.arg2=r2 -cc.hexagon.arg3=r3 -cc.hexagon.arg4=r4 -cc.hexagon.arg5=r5 -cc.hexagon.argn=stack_rev cc.hexagon.maxargs=6 -cc.hexagon.ret=r0 +cc.hexagon.arg0=R0 +cc.hexagon.arg1=R1 +cc.hexagon.arg2=R2 +cc.hexagon.arg3=R3 +cc.hexagon.arg4=R4 +cc.hexagon.arg5=R5 +cc.hexagon.argn=stack_rev +cc.hexagon.ret=R0 hvx=cc cc.hvx.name=hvx -cc.hvx.arg0=v0 -cc.hvx.arg1=v1 -cc.hvx.arg2=v2 -cc.hvx.arg3=v3 -cc.hvx.arg4=v4 -cc.hvx.arg5=v5 -cc.hvx.arg6=v6 -cc.hvx.arg7=v7 -cc.hvx.arg8=v8 -cc.hvx.arg9=v9 -cc.hvx.arg10=v10 -cc.hvx.arg11=v11 -cc.hvx.arg12=v12 -cc.hvx.arg13=v13 -cc.hvx.arg14=v14 -cc.hvx.arg15=v15 cc.hvx.maxargs=16 -cc.hvx.ret=v0 +cc.hvx.arg0=V0 +cc.hvx.arg1=V1 +cc.hvx.arg2=V2 +cc.hvx.arg3=V3 +cc.hvx.arg4=V4 +cc.hvx.arg5=V5 +cc.hvx.arg6=V6 +cc.hvx.arg7=V7 +cc.hvx.arg8=V8 +cc.hvx.arg9=V9 +cc.hvx.arg10=V10 +cc.hvx.arg11=V11 +cc.hvx.arg12=V12 +cc.hvx.arg13=V13 +cc.hvx.arg14=V14 +cc.hvx.arg15=V15 +cc.hvx.ret=V0 diff --git a/librz/include/rz_util/rz_bits.h b/librz/include/rz_util/rz_bits.h index 30ca07ed9ed..113f88ae7a9 100644 --- a/librz/include/rz_util/rz_bits.h +++ b/librz/include/rz_util/rz_bits.h @@ -8,6 +8,9 @@ extern "C" { #endif +#include +#include + /** * \brief Get the number of leading zeros of a 64-bit integer in binary representation. * \param x the 64-bit integer diff --git a/librz/include/rz_vector.h b/librz/include/rz_vector.h index d091c66a92b..469942b5695 100644 --- a/librz/include/rz_vector.h +++ b/librz/include/rz_vector.h @@ -312,6 +312,8 @@ RZ_API RZ_BORROW void **rz_pvector_find(RZ_NONNULL const RzPVector *vec, RZ_NONN // join two pvector into one, pvec1 should free the joined element in pvec2 RZ_API bool rz_pvector_join(RZ_NONNULL RzPVector *pvec1, RZ_NONNULL RzPVector *pvec2); +RZ_API void *rz_pvector_assign_at(RZ_BORROW RZ_NONNULL RzPVector *vec, size_t index, RZ_OWN RZ_NONNULL void *ptr); + // removes and returns the pointer at the given index. Does not call free. RZ_API void *rz_pvector_remove_at(RzPVector *vec, size_t index); diff --git a/librz/util/vector.c b/librz/util/vector.c index 59ce9d8832a..addc707075c 100644 --- a/librz/util/vector.c +++ b/librz/util/vector.c @@ -471,6 +471,26 @@ RZ_API bool rz_pvector_join(RZ_NONNULL RzPVector *pvec1, RZ_NONNULL RzPVector *p return true; } +/** + * \brief Assign the pointer \p ptr at \p index in the pvector. + * + * \param vec The pvector to assign to. + * \param index The index to assign the pointer to. + * \param ptr The pointer to assign. + * + * \return The pointer stored at \p index before. Or NULL in case of failure. + */ +RZ_API void *rz_pvector_assign_at(RZ_BORROW RZ_NONNULL RzPVector *vec, size_t index, RZ_OWN RZ_NONNULL void *ptr) { + rz_return_val_if_fail(vec && ptr, NULL); + void **p = rz_vector_index_ptr(&vec->v, index); + if (!p) { + return NULL; + } + void *prev = *p; + rz_vector_assign_at(&vec->v, index, ptr); + return prev; +} + RZ_API void *rz_pvector_remove_at(RzPVector *vec, size_t index) { rz_return_val_if_fail(vec, NULL); void *r = rz_pvector_at(vec, index); diff --git a/test/db/analysis/hexagon b/test/db/analysis/hexagon index 4dff545335d..0d0ea55a067 100644 --- a/test/db/analysis/hexagon +++ b/test/db/analysis/hexagon @@ -9,13 +9,13 @@ afx EOF EXPECT=< 0x00005128 ? jump 0x5128 -c 0x00005130 -> 0x00005134 ? if (P0) jump:nt 0x5154 -c 0x00005130 -> 0x00005154 ? if (P0) jump:nt 0x5154 +c 0x00005130 -> 0x00005134 [ if (P0) jump:nt 0x5154 +c 0x00005130 -> 0x00005154 [ if (P0) jump:nt 0x5154 c 0x00005134 -> 0x00005138 [ jump 0x5138 C 0x00005138 -> 0x000050e0 [ call sym.pHello C 0x0000513c -> 0x000050f8 [ call sym.pWorld c 0x00005140 -> 0x00005144 [ jump 0x5144 -c 0x00005150 -> 0x00005128 ? jump 0x5128 +c 0x00005150 -> 0x00005128 [ jump 0x5128 EOF RUN @@ -451,7 +451,7 @@ EOF EXPECT=<.new,#0x7)) jump:nt 0x2a4 - RzAsmToken tokens[21] = { - { .start = 0, .len = 1, .type = RZ_ASM_TOKEN_META, .val.number = 0 }, // │ + const ut8 buf[] = "\x08\x48\x00\x5c"; // \ if (P0.new) jump:nt 0x18 + RzAsmToken tokens[13] = { + { .start = 0, .len = 1, .type = RZ_ASM_TOKEN_META, .val.number = 0 }, // backslash { .start = 1, .len = 3, .type = RZ_ASM_TOKEN_SEPARATOR, .val.number = 0 }, // \s\s\s { .start = 4, .len = 2, .type = RZ_ASM_TOKEN_MNEMONIC, .val.number = 0 }, // if { .start = 6, .len = 1, .type = RZ_ASM_TOKEN_SEPARATOR, .val.number = 0 }, // \s { .start = 7, .len = 1, .type = RZ_ASM_TOKEN_SEPARATOR, .val.number = 0 }, // ( - { .start = 8, .len = 3, .type = RZ_ASM_TOKEN_MNEMONIC, .val.number = 0 }, // cmp - { .start = 11, .len = 1, .type = RZ_ASM_TOKEN_SEPARATOR, .val.number = 0 }, // . - { .start = 12, .len = 2, .type = RZ_ASM_TOKEN_MNEMONIC, .val.number = 0 }, // eq - { .start = 14, .len = 1, .type = RZ_ASM_TOKEN_SEPARATOR, .val.number = 0 }, // ( - { .start = 15, .len = 5, .type = RZ_ASM_TOKEN_META, .val.number = 0 }, // - { .start = 20, .len = 4, .type = RZ_ASM_TOKEN_META, .val.number = 0 }, // .new - { .start = 24, .len = 1, .type = RZ_ASM_TOKEN_SEPARATOR, .val.number = 0 }, // , - { .start = 25, .len = 1, .type = RZ_ASM_TOKEN_META, .val.number = 0 }, // # - { .start = 26, .len = 3, .type = RZ_ASM_TOKEN_NUMBER, .val.number = 7 }, // 0x7 - { .start = 29, .len = 1, .type = RZ_ASM_TOKEN_SEPARATOR, .val.number = 0 }, // ) - { .start = 30, .len = 1, .type = RZ_ASM_TOKEN_SEPARATOR, .val.number = 0 }, // ) - { .start = 31, .len = 1, .type = RZ_ASM_TOKEN_SEPARATOR, .val.number = 0 }, // \s - { .start = 32, .len = 4, .type = RZ_ASM_TOKEN_MNEMONIC, .val.number = 0 }, // jump - { .start = 36, .len = 3, .type = RZ_ASM_TOKEN_META, .val.number = 0 }, // :nt - { .start = 39, .len = 1, .type = RZ_ASM_TOKEN_SEPARATOR, .val.number = 0 }, // \s - { .start = 40, .len = 5, .type = RZ_ASM_TOKEN_NUMBER, .val.number = 0x2a8 } // 0x2a8 + { .start = 8, .len = 2, .type = RZ_ASM_TOKEN_REGISTER, .val.number = 0 }, // P0 + { .start = 10, .len = 4, .type = RZ_ASM_TOKEN_META, .val.number = 0 }, // .new + { .start = 14, .len = 1, .type = RZ_ASM_TOKEN_SEPARATOR, .val.number = 0 }, // ) + { .start = 15, .len = 1, .type = RZ_ASM_TOKEN_SEPARATOR, .val.number = 0 }, // \s + { .start = 16, .len = 4, .type = RZ_ASM_TOKEN_MNEMONIC, .val.number = 0 }, // jump + { .start = 20, .len = 3, .type = RZ_ASM_TOKEN_META, .val.number = 0 }, // :nt + { .start = 23, .len = 1, .type = RZ_ASM_TOKEN_SEPARATOR, .val.number = 0 }, // \s + { .start = 24, .len = 4, .type = RZ_ASM_TOKEN_NUMBER, .val.number = 0x18 } // 0x18 }; RzAsmOp *op = RZ_NEW0(RzAsmOp); @@ -355,7 +359,7 @@ static bool test_rz_tokenize_custom_hexagon_1(void) { if (!op->asm_toks) { mu_fail("NULL check failed.\n"); } - mu_assert_eq(rz_vector_len(op->asm_toks->tokens), 21, "Number of generated tokens."); + mu_assert_eq(rz_vector_len(op->asm_toks->tokens), 13, "Number of generated tokens."); int i = 0; RzAsmToken *it; rz_vector_foreach(op->asm_toks->tokens, it) { @@ -366,6 +370,7 @@ static bool test_rz_tokenize_custom_hexagon_1(void) { ++i; }; + rz_asm_op_fini(op); mu_end; } @@ -381,14 +386,24 @@ static bool test_rz_colorize_generic_0(void) { rz_asm_disassemble(d, asmop, buf, sizeof(buf)); rz_analysis_op(a, anaop, 0x0, buf, sizeof(buf), RZ_ANALYSIS_OP_MASK_ALL); + RzAsmParseParam *param = rz_asm_get_parse_param(a->reg, anaop->type); RzStrBuf *colored_asm = rz_asm_colorize_asm_str(&asmop->buf_asm, p, - rz_asm_get_parse_param(a->reg, anaop->type), asmop->asm_toks); + param, asmop->asm_toks); RzStrBuf *expected = rz_strbuf_new("\x1b[35mldur\x1b[0m\x1b[37m \x1b[0m\x1b[36mx4\x1b[0m\x1b[37m, [\x1b[0m\x1b[36mx6\x1b[0m\x1b[37m, \x1b[0m\x1b[33m0x14\x1b[0m\x1b[37m]\x1b[0m"); char err_msg[2048]; snprintf(err_msg, sizeof(err_msg), "Colors of \"%s\" are incorrect. Should be \"%s\"\n.", rz_strbuf_get(colored_asm), rz_strbuf_get(expected)); mu_assert_true(rz_strbuf_equals(colored_asm, expected), err_msg); + rz_asm_parse_param_free(param); + rz_asm_op_fini(asmop); + rz_analysis_op_free(anaop); + rz_asm_free(d); + rz_analysis_free(a); + rz_cons_context_free(p->cons->context); + rz_print_free(p); + rz_strbuf_free(expected); + rz_strbuf_free(colored_asm); mu_end; } @@ -412,6 +427,14 @@ static bool test_rz_colorize_generic_1(void) { snprintf(err_msg, sizeof(err_msg), "Colors of \"%s\" are incorrect. Should be \"%s\"\n.", rz_strbuf_get(colored_asm), rz_strbuf_get(expected)); mu_assert_true(rz_strbuf_equals(colored_asm, expected), err_msg); + rz_asm_op_fini(asmop); + rz_analysis_op_free(anaop); + rz_asm_free(d); + rz_analysis_free(a); + rz_cons_context_free(p->cons->context); + rz_print_free(p); + rz_strbuf_free(expected); + rz_strbuf_free(colored_asm); mu_end; } @@ -435,6 +458,14 @@ static bool test_rz_colorize_generic_2(void) { snprintf(err_msg, sizeof(err_msg), "Colors of \"%s\" are incorrect. Should be \"%s\"\n.", rz_strbuf_get(colored_asm), rz_strbuf_get(expected)); mu_assert_true(rz_strbuf_equals(colored_asm, expected), err_msg); + rz_asm_op_fini(asmop); + rz_analysis_op_free(anaop); + rz_asm_free(d); + rz_analysis_free(a); + rz_cons_context_free(p->cons->context); + rz_print_free(p); + rz_strbuf_free(expected); + rz_strbuf_free(colored_asm); mu_end; } @@ -458,6 +489,14 @@ static bool test_rz_colorize_generic_3(void) { snprintf(err_msg, sizeof(err_msg), "Colors of \"%s\" are incorrect. Should be \"%s\"\n.", rz_strbuf_get(colored_asm), rz_strbuf_get(expected)); mu_assert_true(rz_strbuf_equals(colored_asm, expected), err_msg); + rz_asm_op_fini(asmop); + rz_analysis_op_free(anaop); + rz_asm_free(d); + rz_analysis_free(a); + rz_cons_context_free(p->cons->context); + rz_print_free(p); + rz_strbuf_free(expected); + rz_strbuf_free(colored_asm); mu_end; } @@ -481,6 +520,14 @@ static bool test_rz_colorize_generic_4(void) { snprintf(err_msg, sizeof(err_msg), "Colors of \"%s\" are incorrect. Should be \"%s\"\n.", rz_strbuf_get(colored_asm), rz_strbuf_get(expected)); mu_assert_true(rz_strbuf_equals(colored_asm, expected), err_msg); + rz_asm_op_fini(asmop); + rz_analysis_op_free(anaop); + rz_asm_free(d); + rz_analysis_free(a); + rz_cons_context_free(p->cons->context); + rz_print_free(p); + rz_strbuf_free(expected); + rz_strbuf_free(colored_asm); mu_end; } @@ -492,19 +539,25 @@ static bool test_rz_colorize_custom_hexagon_0(void) { RzPrint *p = setup_print(); RzAsmOp *asmop = rz_asm_op_new(); RzAnalysisOp *anaop = rz_analysis_op_new(); - // "? if (cmp.eq(.new,#0x0)) jump:nt 0x2ac" 20c00224 - ut8 buf[] = "\x20\xc0\x02\x24"; + // "\ if (P0.new) jump:nt 0x18 + ut8 buf[] = "\x08\xe8\x00\x5c"; rz_asm_disassemble(d, asmop, buf, sizeof(buf)); rz_analysis_op(a, anaop, pc, buf, sizeof(buf), RZ_ANALYSIS_OP_MASK_ALL); RzStrBuf *colored_asm = rz_print_colorize_asm_str(p, asmop->asm_toks); - RzStrBuf *expected = rz_strbuf_new("\x1b[90m[\x1b[0m\x1b[37m \x1b[0m\x1b[32mif\x1b[0m\x1b[37m \x1b[0m\x1b[37m(\x1b[0m\x1b[32mcmp\x1b[0m\x1b[37m.\x1b[0m\x1b[32meq\x1b[0m\x1b[37m(\x1b[0m\x1b[90m\x1b[0m\x1b[90m.new\x1b[0m\x1b[37m,\x1b[0m\x1b[90m#\x1b[0m\x1b[33m0x0\x1b[0m\x1b[37m)\x1b[0m\x1b[37m)\x1b[0m\x1b[37m \x1b[0m\x1b[32mjump\x1b[0m\x1b[90m:nt\x1b[0m\x1b[37m \x1b[0m\x1b[33m0x4c\x1b[0m"); + RzStrBuf *expected = rz_strbuf_new("\x1b[90m\\\x1b[0m\x1b[37m \x1b[0m\x1b[32mif\x1b[0m\x1b[37m \x1b[0m\x1b[37m(\x1b[0m\x1b[36mP0\x1b[0m\x1b[90m.new\x1b[0m\x1b[37m)\x1b[0m\x1b[37m \x1b[0m\x1b[32mjump\x1b[0m\x1b[90m:nt\x1b[0m\x1b[37m \x1b[0m\x1b[33m0x218\x1b[0m"); char err_msg[2048]; snprintf(err_msg, sizeof(err_msg), "Colors of \"%s\" are incorrect. Should be \"%s\"\n.", rz_strbuf_get(colored_asm), rz_strbuf_get(expected)); mu_assert_true(rz_strbuf_equals(colored_asm, expected), err_msg); + rz_asm_op_fini(asmop); + rz_analysis_op_free(anaop); + rz_cons_context_free(p->cons->context); + rz_print_free(p); + rz_strbuf_free(expected); + rz_strbuf_free(colored_asm); mu_end; } @@ -529,6 +582,12 @@ static bool test_rz_colorize_custom_hexagon_1(void) { snprintf(err_msg, sizeof(err_msg), "Colors of \"%s\" are incorrect. Should be \"%s\"\n.", rz_strbuf_get(colored_asm), rz_strbuf_get(expected)); mu_assert_true(rz_strbuf_equals(colored_asm, expected), err_msg); + rz_asm_op_fini(asmop); + rz_analysis_op_free(anaop); + rz_cons_context_free(p->cons->context); + rz_print_free(p); + rz_strbuf_free(expected); + rz_strbuf_free(colored_asm); mu_end; } @@ -567,8 +626,14 @@ static bool test_rz_colorize_custom_hexagon_2(void) { expected = rz_strbuf_new(expected_str[i / 4]); snprintf(err_msg, sizeof(err_msg), "Colors of \"%s\" are incorrect. Should be \"%s\"\n.", rz_strbuf_get(colored_asm), rz_strbuf_get(expected)); mu_assert_true(rz_strbuf_equals(colored_asm, expected), err_msg); + rz_strbuf_free(colored_asm); + rz_strbuf_free(expected); } + rz_asm_op_fini(asmop); + rz_analysis_op_free(anaop); + rz_cons_context_free(p->cons->context); + rz_print_free(p); mu_end; } @@ -651,8 +716,14 @@ static bool test_rz_tokenize_custom_bf_0(void) { RzStrBuf *expected = rz_strbuf_new(expected_str[i]); snprintf(err_msg, sizeof(err_msg), "Colors of \"%s\" are incorrect. Should be \"%s\"\n.", rz_strbuf_get(colored_asm), rz_strbuf_get(expected)); mu_assert_true(rz_strbuf_equals(colored_asm, expected), err_msg); + rz_asm_op_fini(asmop); + rz_strbuf_free(expected); + rz_strbuf_free(colored_asm); } + rz_asm_free(a); + rz_cons_context_free(p->cons->context); + rz_print_free(p); mu_end; } diff --git a/test/unit/test_vector.c b/test/unit/test_vector.c index 63f01aa3c12..d89b69f34fc 100644 --- a/test/unit/test_vector.c +++ b/test/unit/test_vector.c @@ -944,6 +944,24 @@ static bool test_pvector_contains(void) { mu_end; } +static bool test_pvector_assign_at(void) { + RzPVector v; + init_test_pvector(&v, 5, 0); + ut32 *x = malloc(sizeof(ut32)); + *x = 123467890; + ut32 *e = rz_pvector_assign_at(&v, 3, &x); + mu_assert_eq(*e, 3, "assign_at ret"); + free(e); + mu_assert_eq(v.v.len, 5UL, "assign_at => len"); + mu_assert_eq(*((ut32 **)v.v.a)[0], 0, "assign_at => content at 0"); + mu_assert_eq(*((ut32 **)v.v.a)[1], 1, "assign_at => content at 1"); + mu_assert_eq(*((ut32 **)v.v.a)[2], 2, "assign_at => content at 2"); + mu_assert_eq(*((ut32 **)v.v.a)[3], 123467890, "assign_at => content at 3"); + mu_assert_eq(*((ut32 **)v.v.a)[4], 4, "assign_at => content at 4"); + rz_pvector_clear(&v); + mu_end; +} + static bool test_pvector_remove_at(void) { RzPVector v; init_test_pvector(&v, 5, 0); @@ -1438,6 +1456,7 @@ static int all_tests(void) { mu_run_test(test_pvector_join); mu_run_test(test_pvector_contains); mu_run_test(test_pvector_remove_at); + mu_run_test(test_pvector_assign_at); mu_run_test(test_pvector_insert); mu_run_test(test_pvector_insert_range); mu_run_test(test_pvector_pop);